diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index fdaa0c8628f766e241ba9043698b611a0bd78811..4fc5b97caae0735058337c8b23e4ca7471761d24 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -13,7 +13,7 @@ # - Multiple owners are supported. # - Either handle (e.g, @github_user or @github/team) or email can be used. Keep in mind, # that handles might work better because they are more recognizable on GitHub, -# eyou can use them for mentioning unlike an email. +# you can use them for mentioning unlike an email. # - The latest matching rule, if multiple, takes precedence. # CI diff --git a/.github/scripts/common/lib.sh b/.github/scripts/common/lib.sh index 29dc269ffd23b1f51e1eb2b87a61544de0cbb57f..932a6d546c3706f50c74873c32cb8e61e3d33461 100755 --- a/.github/scripts/common/lib.sh +++ b/.github/scripts/common/lib.sh @@ -237,7 +237,7 @@ fetch_release_artifacts() { popd > /dev/null } -# Fetch the release artifacts like binary and sigantures from S3. Assumes the ENV are set: +# Fetch the release artifacts like binary and signatures from S3. Assumes the ENV are set: # - RELEASE_ID # - GITHUB_TOKEN # - REPO in the form paritytech/polkadot diff --git a/.github/workflows/check-links.yml b/.github/workflows/check-links.yml index 903d7a3fcb3d94bb6913d94627418d9212397bf3..58065f369c9cf160b0b94c233df9df1016426d07 100644 --- a/.github/workflows/check-links.yml +++ b/.github/workflows/check-links.yml @@ -3,8 +3,8 @@ name: Check links on: pull_request: paths: - - "*.rs" - - "*.prdoc" + - "**.rs" + - "**.prdoc" - ".github/workflows/check-links.yml" - ".config/lychee.toml" types: [opened, synchronize, reopened, ready_for_review] diff --git a/.github/workflows/subsystem-benchmarks.yml b/.github/workflows/subsystem-benchmarks.yml new file mode 100644 index 0000000000000000000000000000000000000000..f0d56bf6e9d3937d2f7c16873ea34bf1377e2aa2 --- /dev/null +++ b/.github/workflows/subsystem-benchmarks.yml @@ -0,0 +1,54 @@ +# The actions takes json file as input and runs github-action-benchmark for it. + +on: + workflow_dispatch: + inputs: + benchmark-data-dir-path: + description: "Path to the benchmark data directory" + required: true + type: string + output-file-path: + description: "Path to the benchmark data file" + required: true + type: string + +jobs: + subsystem-benchmarks: + runs-on: ubuntu-latest + environment: subsystem-benchmarks + steps: + - name: Validate inputs + run: | + echo "${{ github.event.inputs.benchmark-data-dir-path }}" | grep -P '^[a-z\-]' + echo "${{ github.event.inputs.output-file-path }}" | grep -P '^[a-z\-]+\.json' + + - name: Checkout Sources + uses: actions/checkout@v4.1.2 + with: + fetch-depth: 0 + ref: "gh-pages" + + - name: Copy bench results + id: step_one + run: | + cp bench/gitlab/${{ github.event.inputs.output-file-path }} ${{ github.event.inputs.output-file-path }} + + - name: Switch branch + id: step_two + run: | + git checkout master -- + + - uses: actions/create-github-app-token@v1 + id: app-token + with: + app-id: ${{ secrets.POLKADOTSDK_GHPAGES_APP_ID }} + private-key: ${{ secrets.POLKADOTSDK_GHPAGES_APP_KEY }} + + - name: Store benchmark result + uses: benchmark-action/github-action-benchmark@v1 + with: + tool: "customSmallerIsBetter" + output-file-path: ${{ github.event.inputs.output-file-path }} + benchmark-data-dir-path: "bench/${{ github.event.inputs.benchmark-data-dir-path }}" + github-token: ${{ steps.app-token.outputs.token }} + auto-push: true diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 7f8796ca51248acb8be92fcb9f76e280b18e605e..93a6ccb9f8fbabc48a31d403ae4ed17bc2c2966a 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -147,6 +147,13 @@ default: - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues +.publish-gh-pages-refs: + rules: + - if: $CI_PIPELINE_SOURCE == "pipeline" + when: never + - if: $CI_PIPELINE_SOURCE == "web" && $CI_COMMIT_REF_NAME == "master" + - if: $CI_COMMIT_REF_NAME == "master" + # handle the specific case where benches could store incorrect bench data because of the downstream staging runs # exclude cargo-check-benches from such runs .test-refs-check-benches: diff --git a/.gitlab/pipeline/build.yml b/.gitlab/pipeline/build.yml index f8de6135572565d9d16465e68aa3f0bace915cc5..44d66eb2f5eb73dcdab8dad2b4b35165cf9dc4c7 100644 --- a/.gitlab/pipeline/build.yml +++ b/.gitlab/pipeline/build.yml @@ -350,7 +350,7 @@ build-runtimes-polkavm: - .run-immediately # - .collect-artifact variables: - # this variable gets overriden by "rusty-cachier environment inject", use the value as default + # this variable gets overridden by "rusty-cachier environment inject", use the value as default CARGO_TARGET_DIR: "$CI_PROJECT_DIR/target" before_script: - mkdir -p ./artifacts/subkey diff --git a/.gitlab/pipeline/publish.yml b/.gitlab/pipeline/publish.yml index b73acb560f67f93e540826b95fcf075374189846..a37ba012a8a7678b2b7a453fb8d3dfb6d2582501 100644 --- a/.gitlab/pipeline/publish.yml +++ b/.gitlab/pipeline/publish.yml @@ -3,16 +3,13 @@ publish-rustdoc: stage: publish - extends: .kubernetes-env + extends: + - .kubernetes-env + - .publish-gh-pages-refs variables: CI_IMAGE: node:18 GIT_DEPTH: 100 RUSTDOCS_DEPLOY_REFS: "master" - rules: - - if: $CI_PIPELINE_SOURCE == "pipeline" - when: never - - if: $CI_PIPELINE_SOURCE == "web" && $CI_COMMIT_REF_NAME == "master" - - if: $CI_COMMIT_REF_NAME == "master" needs: - job: build-rustdoc artifacts: true @@ -60,9 +57,76 @@ publish-rustdoc: - git commit -m "___Updated docs for ${CI_COMMIT_REF_NAME}___" || echo "___Nothing to commit___" - git push origin gh-pages --force + # artificial sleep to publish gh-pages + - sleep 300 + after_script: + - rm -rf .git/ ./* + +publish-subsystem-benchmarks: + stage: publish + variables: + CI_IMAGE: "paritytech/tools:latest" + extends: + - .kubernetes-env + - .publish-gh-pages-refs + needs: + - job: subsystem-regression-tests + artifacts: true + - job: publish-rustdoc + artifacts: false + script: + # setup ssh + - eval $(ssh-agent) + - ssh-add - <<< ${GITHUB_SSH_PRIV_KEY} + - mkdir ~/.ssh && touch ~/.ssh/known_hosts + - ssh-keyscan -t rsa github.com >> ~/.ssh/known_hosts + # Set git config + - rm -rf .git/config + - git config user.email "devops-team@parity.io" + - git config user.name "${GITHUB_USER}" + - git config remote.origin.url "git@github.com:/paritytech/${CI_PROJECT_NAME}.git" + - git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*" + - git fetch origin gh-pages + # Push result to github + - git checkout gh-pages --force + - mkdir -p bench/gitlab/ || echo "Directory exists" + - rm -rf bench/gitlab/*.json || echo "No json files" + - cp -r charts/*.json bench/gitlab/ + - git add bench/gitlab/ + - git commit -m "Add json files with benchmark results for ${CI_COMMIT_REF_NAME}" + - git push origin gh-pages + # artificial sleep to publish gh-pages + - sleep 300 + allow_failure: true after_script: - rm -rf .git/ ./* +trigger_workflow: + stage: deploy + extends: + - .kubernetes-env + - .publish-gh-pages-refs + needs: + - job: publish-subsystem-benchmarks + artifacts: false + - job: subsystem-regression-tests + artifacts: true + script: + - echo "Triggering workflow" + - | + for benchmark in $(ls charts/*.json); do + export bencmark_name=$(basename $benchmark) + echo "Benchmark: $bencmark_name" + export benchmark_dir=$(echo $bencmark_name | sed 's/\.json//') + curl -q -X POST \ + -H "Accept: application/vnd.github.v3+json" \ + -H "Authorization: token $GITHUB_TOKEN" \ + https://api.github.com/repos/paritytech/${CI_PROJECT_NAME}/actions/workflows/subsystem-benchmarks.yml/dispatches \ + -d '{"ref":"refs/heads/master","inputs":{"benchmark-data-dir-path":"'$benchmark_dir'","output-file-path":"'$bencmark_name'"}}' + sleep 300 + done + allow_failure: true + # note: images are used not only in zombienet but also in rococo, wococo and versi .build-push-image: image: $BUILDAH_IMAGE diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml index d244316000aaf152810c396e37e470694be506cc..48c84b472b4354fe7eeb12bbf2880c276e90a477 100644 --- a/.gitlab/pipeline/test.yml +++ b/.gitlab/pipeline/test.yml @@ -25,7 +25,6 @@ test-linux-stable: # "upgrade_version_checks_should_work" is currently failing - | time cargo nextest run \ - --filter-expr 'not deps(/polkadot-subsystem-bench/)' \ --workspace \ --locked \ --release \ @@ -70,7 +69,7 @@ test-linux-stable-runtime-benchmarks: # but still want to have debug assertions. RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" script: - - time cargo nextest run --filter-expr 'not deps(/polkadot-subsystem-bench/)' --workspace --features runtime-benchmarks benchmark --locked --cargo-profile testnet + - time cargo nextest run --workspace --features runtime-benchmarks benchmark --locked --cargo-profile testnet # can be used to run all tests # test-linux-stable-all: @@ -497,12 +496,19 @@ test-syscalls: subsystem-regression-tests: stage: test + artifacts: + name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}" + when: always + expire_in: 1 days + paths: + - charts/ extends: - .docker-env - .common-refs - .run-immediately script: - cargo bench --profile=testnet -p polkadot-availability-recovery --bench availability-recovery-regression-bench --features subsystem-benchmarks + - cargo bench --profile=testnet -p polkadot-availability-distribution --bench availability-distribution-regression-bench --features subsystem-benchmarks tags: - benchmark allow_failure: true diff --git a/.gitlab/pipeline/zombienet.yml b/.gitlab/pipeline/zombienet.yml index 8d308714fab3cb44827bb202fce0939f52f730ad..52948e1eb719d9f8669523d9762f5662fd1b6e96 100644 --- a/.gitlab/pipeline/zombienet.yml +++ b/.gitlab/pipeline/zombienet.yml @@ -1,7 +1,8 @@ .zombienet-refs: extends: .build-refs variables: - ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.95" + ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.99" + PUSHGATEWAY_URL: "http://zombienet-prometheus-pushgateway.managed-monitoring:9091/metrics/job/zombie-metrics" include: # substrate tests diff --git a/.gitlab/rust-features.sh b/.gitlab/rust-features.sh index c0ac192a6ec69ba16abb3bad2ec49de7e9cebb61..c3ec61ab8714768c9a49f2eb2e1e544706a1d875 100755 --- a/.gitlab/rust-features.sh +++ b/.gitlab/rust-features.sh @@ -15,7 +15,7 @@ # # The steps of this script: # 1. Check that all required dependencies are installed. -# 2. Check that all rules are fullfilled for the whole workspace. If not: +# 2. Check that all rules are fulfilled for the whole workspace. If not: # 4. Check all crates to find the offending ones. # 5. Print all offending crates and exit with code 1. # diff --git a/Cargo.lock b/Cargo.lock index 074b657e767b1cc121a471f5aee662fc2290ebda..42524fc7765babca8787722d48c70b0a1d06e376 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -275,9 +275,9 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.2" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15c4c2c83f81532e5845a733998b6971faca23490340a418e9b72a3ec9de12ea" +checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" [[package]] name = "anstyle-parse" @@ -867,6 +867,7 @@ dependencies = [ "cumulus-pallet-xcmp-queue", "cumulus-primitives-aura", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-utility", "frame-benchmarking", "frame-executive", @@ -990,6 +991,7 @@ dependencies = [ "cumulus-pallet-xcmp-queue", "cumulus-primitives-aura", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-utility", "frame-benchmarking", "frame-executive", @@ -1239,9 +1241,9 @@ checksum = "ecc7ab41815b3c653ccd2978ec3255c81349336702dfdf62ee6f7069b12a3aae" [[package]] name = "async-trait" -version = "0.1.74" +version = "0.1.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" +checksum = "a507401cad91ec6a857ed5513a2073c82a9b9048762b885bb98655b306964681" dependencies = [ "proc-macro2", "quote", @@ -1393,7 +1395,7 @@ name = "binary-merkle-tree" version = "13.0.0" dependencies = [ "array-bytes 6.1.0", - "env_logger 0.9.3", + "env_logger 0.11.3", "hash-db", "log", "sp-core", @@ -2010,6 +2012,7 @@ dependencies = [ "cumulus-pallet-xcmp-queue", "cumulus-primitives-aura", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-utility", "frame-benchmarking", "frame-executive", @@ -2148,6 +2151,7 @@ dependencies = [ "pallet-message-queue", "pallet-xcm", "parachains-common", + "parity-scale-codec", "rococo-westend-system-emulated-network", "sp-runtime", "staging-xcm", @@ -2180,6 +2184,7 @@ dependencies = [ "cumulus-pallet-xcmp-queue", "cumulus-primitives-aura", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-utility", "frame-benchmarking", "frame-executive", @@ -2623,9 +2628,9 @@ dependencies = [ [[package]] name = "clap-num" -version = "1.1.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e063d263364859dc54fb064cedb7c122740cd4733644b14b176c097f51e8ab7" +checksum = "488557e97528174edaa2ee268b23a809e0c598213a4bbcb4f34575a46fda147e" dependencies = [ "num-traits", ] @@ -2738,6 +2743,7 @@ dependencies = [ "cumulus-pallet-xcmp-queue", "cumulus-primitives-aura", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-utility", "frame-benchmarking", "frame-executive", @@ -2843,10 +2849,11 @@ checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" [[package]] name = "colored" -version = "2.1.0" +version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbf2150cce219b664a8a70df7a1f933836724b503f8a413af9365b4dcc4d90b8" +checksum = "2674ec482fbc38012cf31e6c42ba0177b431a0cb6f15fe40efa5aab1bda516f6" dependencies = [ + "is-terminal", "lazy_static", "windows-sys 0.48.0", ] @@ -2992,6 +2999,7 @@ dependencies = [ "cumulus-pallet-xcmp-queue", "cumulus-primitives-aura", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-utility", "frame-benchmarking", "frame-executive", @@ -3086,6 +3094,7 @@ dependencies = [ "cumulus-pallet-xcmp-queue", "cumulus-primitives-aura", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-utility", "frame-benchmarking", "frame-executive", @@ -3150,6 +3159,7 @@ dependencies = [ "cumulus-pallet-xcmp-queue", "cumulus-primitives-aura", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-utility", "frame-benchmarking", "frame-executive", @@ -3802,6 +3812,7 @@ dependencies = [ "sp-blockchain", "sp-consensus", "sp-core", + "sp-io", "sp-runtime", "sp-transaction-pool", ] @@ -4047,7 +4058,7 @@ dependencies = [ "cumulus-primitives-core", "cumulus-primitives-proof-size-hostfunction", "cumulus-test-runtime", - "docify 0.2.7", + "docify", "frame-support", "frame-system", "log", @@ -4320,7 +4331,6 @@ dependencies = [ "frame-system-rpc-runtime-api", "futures", "jsonrpsee", - "pallet-im-online", "pallet-timestamp", "pallet-transaction-payment", "parachains-common", @@ -4570,6 +4580,17 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "derive-syn-parse" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d65d7ce8132b7c0e54497a4d9a55a1c2a0912a0d786cf894472ba818fba45762" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.53", +] + [[package]] name = "derive_more" version = "0.99.17" @@ -4717,47 +4738,21 @@ checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" [[package]] name = "docify" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af1b04e6ef3d21119d3eb7b032bca17f99fe041e9c072f30f32cc0e1a2b1f3c4" -dependencies = [ - "docify_macros 0.1.16", -] - -[[package]] -name = "docify" -version = "0.2.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cc4fd38aaa9fb98ac70794c82a00360d1e165a87fbf96a8a91f9dfc602aaee2" -dependencies = [ - "docify_macros 0.2.7", -] - -[[package]] -name = "docify_macros" -version = "0.1.16" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b5610df7f2acf89a1bb5d1a66ae56b1c7fcdcfe3948856fb3ace3f644d70eb7" +checksum = "43a2f138ad521dc4a2ced1a4576148a6a610b4c5923933b062a263130a6802ce" dependencies = [ - "common-path", - "derive-syn-parse", - "lazy_static", - "proc-macro2", - "quote", - "regex", - "syn 2.0.53", - "termcolor", - "walkdir", + "docify_macros", ] [[package]] name = "docify_macros" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63fa215f3a0d40fb2a221b3aa90d8e1fbb8379785a990cb60d62ac71ebdc6460" +checksum = "1a081e51fb188742f5a7a1164ad752121abcb22874b21e2c3b0dd040c515fdad" dependencies = [ "common-path", - "derive-syn-parse", + "derive-syn-parse 0.2.0", "once_cell", "proc-macro2", "quote", @@ -5005,10 +5000,10 @@ dependencies = [ ] [[package]] -name = "env_logger" -version = "0.8.4" +name = "env_filter" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3" +checksum = "a009aa4810eb158359dda09d0c87378e4bbb89b5a801f016885a4707ba24f7ea" dependencies = [ "log", "regex", @@ -5016,15 +5011,12 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.9.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7" +checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3" dependencies = [ - "atty", - "humantime", "log", "regex", - "termcolor", ] [[package]] @@ -5040,6 +5032,19 @@ dependencies = [ "termcolor", ] +[[package]] +name = "env_logger" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38b35839ba51819680ba087cd351788c9a3c476841207e0b8cee0b04722343b9" +dependencies = [ + "anstream", + "anstyle", + "env_filter", + "humantime", + "log", +] + [[package]] name = "environmental" version = "1.1.4" @@ -5453,7 +5458,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" name = "frame" version = "0.0.1-dev" dependencies = [ - "docify 0.2.7", + "docify", "frame-executive", "frame-support", "frame-system", @@ -5682,7 +5687,7 @@ dependencies = [ "array-bytes 6.1.0", "assert_matches", "bitflags 1.3.2", - "docify 0.2.7", + "docify", "environmental", "frame-metadata", "frame-support-procedural", @@ -5725,7 +5730,7 @@ version = "23.0.0" dependencies = [ "Inflector", "cfg-expr", - "derive-syn-parse", + "derive-syn-parse 0.2.0", "expander 2.0.0", "frame-support-procedural-tools", "itertools 0.10.5", @@ -5826,7 +5831,7 @@ version = "28.0.0" dependencies = [ "cfg-if", "criterion 0.4.0", - "docify 0.2.7", + "docify", "frame-support", "log", "parity-scale-codec", @@ -6772,7 +6777,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.3", + "socket2 0.5.6", "widestring", "windows-sys 0.48.0", "winreg", @@ -6854,9 +6859,9 @@ checksum = "078e285eafdfb6c4b434e0d31e8cfcb5115b651496faca5749b88fafd4f23bfd" [[package]] name = "jsonrpsee" -version = "0.22.0" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a95f7cc23d5fab0cdeeaf6bad8c8f5e7a3aa7f0d211957ea78232b327ab27b0" +checksum = "87f3ae45a64cfc0882934f963be9431b2a165d667f53140358181f262aca0702" dependencies = [ "jsonrpsee-core", "jsonrpsee-http-client", @@ -6870,9 +6875,9 @@ dependencies = [ [[package]] name = "jsonrpsee-client-transport" -version = "0.22.0" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b1736cfa3845fd9f8f43751f2b8e0e83f7b6081e754502f7d63b6587692cc83" +checksum = "455fc882e56f58228df2aee36b88a1340eafd707c76af2fa68cf94b37d461131" dependencies = [ "futures-util", "http", @@ -6891,9 +6896,9 @@ dependencies = [ [[package]] name = "jsonrpsee-core" -version = "0.22.0" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82030d038658974732103e623ba2e0abec03bbbe175b39c0a2fafbada60c5868" +checksum = "b75568f4f9696e3a47426e1985b548e1a9fcb13372a5e320372acaf04aca30d1" dependencies = [ "anyhow", "async-lock 3.3.0", @@ -6917,9 +6922,9 @@ dependencies = [ [[package]] name = "jsonrpsee-http-client" -version = "0.22.0" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36a06ef0de060005fddf772d54597bb6a8b0413da47dcffd304b0306147b9678" +checksum = "9e7a95e346f55df84fb167b7e06470e196e7d5b9488a21d69c5d9732043ba7ba" dependencies = [ "async-trait", "hyper", @@ -6937,22 +6942,22 @@ dependencies = [ [[package]] name = "jsonrpsee-proc-macros" -version = "0.22.0" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69fc56131589f82e57805f7338b87023db4aafef813555708b159787e34ad6bc" +checksum = "30ca066e73dd70294aebc5c2675d8ffae43be944af027c857ce0d4c51785f014" dependencies = [ "heck 0.4.1", "proc-macro-crate 3.0.0", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.53", ] [[package]] name = "jsonrpsee-server" -version = "0.22.0" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d85be77fe5b2a94589e3164fb780017f7aff7d646b49278c0d0346af16975c8e" +checksum = "0e29c1bd1f9bba83c864977c73404e505f74f730fa0db89dd490ec174e36d7f0" dependencies = [ "futures-util", "http", @@ -6974,9 +6979,9 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.22.0" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a48fdc1202eafc51c63e00406575e59493284ace8b8b61aa16f3a6db5d64f1a" +checksum = "3467fd35feeee179f71ab294516bdf3a81139e7aeebdd860e46897c12e1a3368" dependencies = [ "anyhow", "beef", @@ -6987,9 +6992,9 @@ dependencies = [ [[package]] name = "jsonrpsee-ws-client" -version = "0.22.0" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5ce25d70a8e4d3cc574bbc3cad0137c326ad64b194793d5e7bbdd3fa4504181" +checksum = "68ca71e74983f624c0cb67828e480a981586074da8ad3a2f214c6a3f884edab9" dependencies = [ "http", "jsonrpsee-client-transport", @@ -7943,7 +7948,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "468155613a44cfd825f1fb0ffa532b018253920d404e6fca1e8d43155198a46d" dependencies = [ "const-random", - "derive-syn-parse", + "derive-syn-parse 0.1.5", "macro_magic_core_macros", "proc-macro2", "quote", @@ -9215,7 +9220,7 @@ name = "pallet-bags-list" version = "27.0.0" dependencies = [ "aquamarine 0.5.0", - "docify 0.2.7", + "docify", "frame-benchmarking", "frame-election-provider-support", "frame-support", @@ -9263,7 +9268,7 @@ dependencies = [ name = "pallet-balances" version = "28.0.0" dependencies = [ - "docify 0.2.7", + "docify", "frame-benchmarking", "frame-support", "frame-system", @@ -9534,7 +9539,7 @@ dependencies = [ "array-bytes 6.1.0", "assert_matches", "bitflags 1.3.2", - "env_logger 0.9.3", + "env_logger 0.11.3", "environmental", "frame-benchmarking", "frame-support", @@ -9552,6 +9557,7 @@ dependencies = [ "pallet-timestamp", "pallet-utility", "parity-scale-codec", + "paste", "pretty_assertions", "rand", "rand_pcg", @@ -9780,7 +9786,7 @@ dependencies = [ "sp-runtime", "sp-std 14.0.0", "sp-tracing 16.0.0", - "strum 0.24.1", + "strum 0.26.2", ] [[package]] @@ -9881,7 +9887,7 @@ dependencies = [ name = "pallet-example-single-block-migrations" version = "0.0.1" dependencies = [ - "docify 0.2.7", + "docify", "frame-executive", "frame-support", "frame-system", @@ -9947,7 +9953,7 @@ dependencies = [ name = "pallet-fast-unstake" version = "27.0.0" dependencies = [ - "docify 0.2.7", + "docify", "frame-benchmarking", "frame-election-provider-support", "frame-support", @@ -10148,7 +10154,7 @@ dependencies = [ name = "pallet-migrations" version = "1.0.0" dependencies = [ - "docify 0.1.16", + "docify", "frame-benchmarking", "frame-executive", "frame-support", @@ -10201,7 +10207,7 @@ name = "pallet-mmr" version = "27.0.0" dependencies = [ "array-bytes 6.1.0", - "env_logger 0.9.3", + "env_logger 0.11.3", "frame-benchmarking", "frame-support", "frame-system", @@ -10452,7 +10458,7 @@ dependencies = [ name = "pallet-paged-list" version = "0.6.0" dependencies = [ - "docify 0.2.7", + "docify", "frame-benchmarking", "frame-support", "frame-system", @@ -10494,7 +10500,7 @@ dependencies = [ name = "pallet-parameters" version = "0.0.1" dependencies = [ - "docify 0.2.7", + "docify", "frame-benchmarking", "frame-support", "frame-system", @@ -10655,7 +10661,7 @@ dependencies = [ name = "pallet-safe-mode" version = "9.0.0" dependencies = [ - "docify 0.2.7", + "docify", "frame-benchmarking", "frame-support", "frame-system", @@ -10712,7 +10718,7 @@ dependencies = [ name = "pallet-scheduler" version = "29.0.0" dependencies = [ - "docify 0.2.7", + "docify", "frame-benchmarking", "frame-support", "frame-system", @@ -10925,7 +10931,7 @@ dependencies = [ name = "pallet-sudo" version = "28.0.0" dependencies = [ - "docify 0.2.7", + "docify", "frame-benchmarking", "frame-support", "frame-system", @@ -10955,7 +10961,7 @@ dependencies = [ name = "pallet-timestamp" version = "27.0.0" dependencies = [ - "docify 0.2.7", + "docify", "frame-benchmarking", "frame-support", "frame-system", @@ -11059,7 +11065,7 @@ dependencies = [ name = "pallet-treasury" version = "27.0.0" dependencies = [ - "docify 0.2.7", + "docify", "frame-benchmarking", "frame-support", "frame-system", @@ -11079,7 +11085,7 @@ dependencies = [ name = "pallet-tx-pause" version = "9.0.0" dependencies = [ - "docify 0.2.7", + "docify", "frame-benchmarking", "frame-support", "frame-system", @@ -11188,6 +11194,7 @@ dependencies = [ "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", + "xcm-fee-payment-runtime-api", ] [[package]] @@ -11765,6 +11772,7 @@ dependencies = [ "cumulus-pallet-xcmp-queue", "cumulus-primitives-aura", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-utility", "enumflags2", "frame-benchmarking", @@ -11864,6 +11872,7 @@ dependencies = [ "cumulus-pallet-xcmp-queue", "cumulus-primitives-aura", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-utility", "enumflags2", "frame-benchmarking", @@ -12092,7 +12101,7 @@ version = "7.0.0" dependencies = [ "assert_matches", "bitvec", - "env_logger 0.9.3", + "env_logger 0.11.3", "futures", "futures-timer", "itertools 0.10.5", @@ -12122,7 +12131,7 @@ dependencies = [ "always-assert", "assert_matches", "bitvec", - "env_logger 0.9.3", + "env_logger 0.11.3", "futures", "futures-timer", "log", @@ -12178,7 +12187,7 @@ version = "7.0.0" dependencies = [ "assert_matches", "async-trait", - "env_logger 0.9.3", + "env_logger 0.11.3", "fatality", "futures", "futures-timer", @@ -12240,7 +12249,7 @@ version = "7.0.0" dependencies = [ "assert_matches", "bitvec", - "env_logger 0.9.3", + "env_logger 0.11.3", "fatality", "futures", "futures-timer", @@ -12412,7 +12421,7 @@ dependencies = [ "async-trait", "bitvec", "derive_more", - "env_logger 0.9.3", + "env_logger 0.11.3", "futures", "futures-timer", "itertools 0.10.5", @@ -12454,7 +12463,7 @@ version = "7.0.0" dependencies = [ "assert_matches", "bitvec", - "env_logger 0.9.3", + "env_logger 0.11.3", "futures", "futures-timer", "kvdb", @@ -12896,7 +12905,7 @@ dependencies = [ "rand_chacha 0.3.1", "sc-authority-discovery", "sc-network", - "strum 0.24.1", + "strum 0.26.2", "thiserror", "tracing-gum", ] @@ -12988,7 +12997,7 @@ dependencies = [ "assert_matches", "async-trait", "derive_more", - "env_logger 0.9.3", + "env_logger 0.11.3", "fatality", "futures", "futures-channel", @@ -13367,7 +13376,7 @@ version = "0.0.1" dependencies = [ "cumulus-pallet-aura-ext", "cumulus-pallet-parachain-system", - "docify 0.2.7", + "docify", "frame", "frame-executive", "frame-support", @@ -13386,11 +13395,14 @@ dependencies = [ "pallet-example-single-block-migrations", "pallet-examples", "pallet-multisig", + "pallet-nfts", + "pallet-preimage", "pallet-proxy", "pallet-referenda", "pallet-scheduler", "pallet-timestamp", "pallet-transaction-payment", + "pallet-uniques", "pallet-utility", "parity-scale-codec", "sc-cli", @@ -13429,7 +13441,7 @@ dependencies = [ "assert_matches", "async-trait", "bitvec", - "env_logger 0.9.3", + "env_logger 0.11.3", "frame-benchmarking", "frame-benchmarking-cli", "frame-support", @@ -13443,7 +13455,6 @@ dependencies = [ "log", "mmr-gadget", "pallet-babe", - "pallet-im-online", "pallet-staking", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", @@ -13540,12 +13551,14 @@ dependencies = [ "sp-transaction-pool", "sp-version", "sp-weights", + "staging-xcm", "substrate-prometheus-endpoint", "tempfile", "thiserror", "tracing-gum", "westend-runtime", "westend-runtime-constants", + "xcm-fee-payment-runtime-api", ] [[package]] @@ -13604,7 +13617,7 @@ dependencies = [ "clap-num", "color-eyre", "colored", - "env_logger 0.9.3", + "env_logger 0.11.3", "futures", "futures-timer", "hex", @@ -13644,6 +13657,7 @@ dependencies = [ "sc-service", "schnorrkel 0.11.4", "serde", + "serde_json", "serde_yaml", "sha1", "sp-application-crypto", @@ -15044,7 +15058,6 @@ dependencies = [ "pallet-elections-phragmen", "pallet-grandpa", "pallet-identity", - "pallet-im-online", "pallet-indices", "pallet-membership", "pallet-message-queue", @@ -15115,6 +15128,7 @@ dependencies = [ "substrate-wasm-builder", "tiny-keccak", "tokio", + "xcm-fee-payment-runtime-api", ] [[package]] @@ -15624,7 +15638,7 @@ name = "sc-chain-spec" version = "27.0.0" dependencies = [ "array-bytes 6.1.0", - "docify 0.2.7", + "docify", "log", "memmap2 0.9.3", "parity-scale-codec", @@ -16137,7 +16151,7 @@ dependencies = [ "array-bytes 6.1.0", "assert_matches", "criterion 0.4.0", - "env_logger 0.9.3", + "env_logger 0.11.3", "num_cpus", "parity-scale-codec", "parking_lot 0.12.1", @@ -16573,7 +16587,7 @@ name = "sc-rpc" version = "29.0.0" dependencies = [ "assert_matches", - "env_logger 0.9.3", + "env_logger 0.11.3", "futures", "jsonrpsee", "log", @@ -16815,7 +16829,7 @@ dependencies = [ name = "sc-statement-store" version = "10.0.0" dependencies = [ - "env_logger 0.9.3", + "env_logger 0.11.3", "log", "parity-db", "parking_lot 0.12.1", @@ -17007,9 +17021,9 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.11.0" +version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ef2175c2907e7c8bc0a9c3f86aeb5ec1f3b275300ad58a44d0c3ae379a5e52e" +checksum = "788745a868b0e751750388f4e6546eb921ef714a4317fa6954f7cde114eb2eb7" dependencies = [ "bitvec", "cfg-if", @@ -17021,9 +17035,9 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "2.11.0" +version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634d9b8eb8fd61c5cdd3390d9b2132300a7e7618955b98b8416f118c1b4e144f" +checksum = "7dc2f4e8bc344b9fc3d5f74f72c2e55bfc38d28dc2ebc69c194a3df424e4d9ac" dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", @@ -17898,7 +17912,7 @@ name = "snowbridge-outbound-queue-merkle-tree" version = "0.3.0" dependencies = [ "array-bytes 4.2.0", - "env_logger 0.9.3", + "env_logger 0.11.3", "hex", "hex-literal", "parity-scale-codec", @@ -18208,12 +18222,12 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.3" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2538b18701741680e0322a2302176d3253a35388e2e62f172f64f4f16605f877" +checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" dependencies = [ "libc", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -18401,7 +18415,7 @@ name = "sp-arithmetic" version = "23.0.0" dependencies = [ "criterion 0.4.0", - "docify 0.2.7", + "docify", "integer-sqrt", "num-traits", "parity-scale-codec", @@ -18544,7 +18558,7 @@ dependencies = [ "sp-keystore", "sp-mmr-primitives", "sp-runtime", - "strum 0.24.1", + "strum 0.26.2", "w3f-bls", ] @@ -18835,7 +18849,7 @@ version = "31.0.0" dependencies = [ "sp-core", "sp-runtime", - "strum 0.24.1", + "strum 0.26.2", ] [[package]] @@ -18951,7 +18965,7 @@ dependencies = [ name = "sp-runtime" version = "31.0.1" dependencies = [ - "docify 0.2.7", + "docify", "either", "hash256-std-hasher", "impl-trait-for-tuples", @@ -19721,6 +19735,15 @@ version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125" +[[package]] +name = "strum" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d8cec3501a5194c432b2b7976db6b7d10ec95c253208b45f83f7136aa985e29" +dependencies = [ + "strum_macros 0.26.2", +] + [[package]] name = "strum_macros" version = "0.24.3" @@ -19747,6 +19770,19 @@ dependencies = [ "syn 2.0.53", ] +[[package]] +name = "strum_macros" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6cf59daf282c0a494ba14fd21610a0325f9f90ec9d1231dea26bcb1d696c946" +dependencies = [ + "heck 0.4.1", + "proc-macro2", + "quote", + "rustversion", + "syn 2.0.53", +] + [[package]] name = "subkey" version = "9.0.0" @@ -20012,7 +20048,7 @@ dependencies = [ "parity-wasm", "polkavm-linker", "sp-maybe-compressed-blob", - "strum 0.24.1", + "strum 0.26.2", "tempfile", "toml 0.8.8", "walkdir", @@ -20560,9 +20596,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.33.0" +version = "1.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f38200e3ef7995e5ef13baec2f432a6da0aa9ac495b2c0e8f3b7eec2c92d653" +checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787" dependencies = [ "backtrace", "bytes", @@ -20572,16 +20608,16 @@ dependencies = [ "parking_lot 0.12.1", "pin-project-lite 0.2.12", "signal-hook-registry", - "socket2 0.5.3", + "socket2 0.5.6", "tokio-macros", "windows-sys 0.48.0", ] [[package]] name = "tokio-macros" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" +checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", @@ -21894,7 +21930,6 @@ dependencies = [ "pallet-fast-unstake", "pallet-grandpa", "pallet-identity", - "pallet-im-online", "pallet-indices", "pallet-membership", "pallet-message-queue", @@ -21969,6 +22004,7 @@ dependencies = [ "tiny-keccak", "tokio", "westend-runtime-constants", + "xcm-fee-payment-runtime-api", ] [[package]] @@ -22442,6 +22478,20 @@ dependencies = [ "staging-xcm-executor", ] +[[package]] +name = "xcm-fee-payment-runtime-api" +version = "0.1.0" +dependencies = [ + "frame-support", + "parity-scale-codec", + "scale-info", + "sp-api", + "sp-runtime", + "sp-std 14.0.0", + "sp-weights", + "staging-xcm", +] + [[package]] name = "xcm-procedural" version = "7.0.0" diff --git a/Cargo.toml b/Cargo.toml index 01d6ef8e87bdaf4dcf0e4f73dfde59df76f500c4..e6162830375fd9e0053b5623d01196ceb2f3fb55 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,24 +10,24 @@ resolver = "2" members = [ "bridges/bin/runtime-common", + "bridges/chains/chain-asset-hub-rococo", + "bridges/chains/chain-asset-hub-westend", + "bridges/chains/chain-bridge-hub-cumulus", + "bridges/chains/chain-bridge-hub-kusama", + "bridges/chains/chain-bridge-hub-polkadot", + "bridges/chains/chain-bridge-hub-rococo", + "bridges/chains/chain-bridge-hub-westend", + "bridges/chains/chain-kusama", + "bridges/chains/chain-polkadot", + "bridges/chains/chain-polkadot-bulletin", + "bridges/chains/chain-rococo", + "bridges/chains/chain-westend", "bridges/modules/grandpa", "bridges/modules/messages", "bridges/modules/parachains", "bridges/modules/relayers", "bridges/modules/xcm-bridge-hub", "bridges/modules/xcm-bridge-hub-router", - "bridges/primitives/chain-asset-hub-rococo", - "bridges/primitives/chain-asset-hub-westend", - "bridges/primitives/chain-bridge-hub-cumulus", - "bridges/primitives/chain-bridge-hub-kusama", - "bridges/primitives/chain-bridge-hub-polkadot", - "bridges/primitives/chain-bridge-hub-rococo", - "bridges/primitives/chain-bridge-hub-westend", - "bridges/primitives/chain-kusama", - "bridges/primitives/chain-polkadot", - "bridges/primitives/chain-polkadot-bulletin", - "bridges/primitives/chain-rococo", - "bridges/primitives/chain-westend", "bridges/primitives/header-chain", "bridges/primitives/messages", "bridges/primitives/parachains", @@ -214,6 +214,7 @@ members = [ "polkadot/xcm/xcm-builder", "polkadot/xcm/xcm-executor", "polkadot/xcm/xcm-executor/integration-tests", + "polkadot/xcm/xcm-fee-payment-runtime-api", "polkadot/xcm/xcm-simulator", "polkadot/xcm/xcm-simulator/example", "polkadot/xcm/xcm-simulator/fuzzer", diff --git a/bridges/README.md b/bridges/README.md index a2ce213d2541c346361eb28125a06e3079e1c269..8bfa39841f51e7824d0f1169540342c2bd88b664 100644 --- a/bridges/README.md +++ b/bridges/README.md @@ -38,10 +38,10 @@ cargo test --all ``` Also you can build the repo with [Parity CI Docker -image](https://github.com/paritytech/scripts/tree/master/dockerfiles/bridges-ci): +image](https://github.com/paritytech/scripts/tree/master/dockerfiles/ci-unified): ```bash -docker pull paritytech/bridges-ci:production +docker pull paritytech/ci-unified:latest mkdir ~/cache chown 1000:1000 ~/cache #processes in the container runs as "nonroot" user with UID 1000 docker run --rm -it -w /shellhere/parity-bridges-common \ @@ -49,7 +49,7 @@ docker run --rm -it -w /shellhere/parity-bridges-common \ -v "$(pwd)":/shellhere/parity-bridges-common \ -e CARGO_HOME=/cache/cargo/ \ -e SCCACHE_DIR=/cache/sccache/ \ - -e CARGO_TARGET_DIR=/cache/target/ paritytech/bridges-ci:production cargo build --all + -e CARGO_TARGET_DIR=/cache/target/ paritytech/ci-unified:latest cargo build --all #artifacts can be found in ~/cache/target ``` diff --git a/bridges/bin/runtime-common/Cargo.toml b/bridges/bin/runtime-common/Cargo.toml index fac88b20ca57901d4116e147bd9363a41ff35e36..67b91a16a302d6214830241082b21c407b04c6d1 100644 --- a/bridges/bin/runtime-common/Cargo.toml +++ b/bridges/bin/runtime-common/Cargo.toml @@ -11,10 +11,10 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } hash-db = { version = "0.16.0", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } static_assertions = { version = "1.1", optional = true } # Bridge dependencies diff --git a/bridges/bin/runtime-common/src/messages_api.rs b/bridges/bin/runtime-common/src/messages_api.rs index ccf1c754041ed84dc302f0660fdd5bde8dc8d533..7fbdeb366124778b36c77725be8ca8778020be1b 100644 --- a/bridges/bin/runtime-common/src/messages_api.rs +++ b/bridges/bin/runtime-common/src/messages_api.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -//! Helpers for implementing various message-related runtime API mthods. +//! Helpers for implementing various message-related runtime API methods. use bp_messages::{ InboundMessageDetails, LaneId, MessageNonce, MessagePayload, OutboundMessageDetails, diff --git a/bridges/bin/runtime-common/src/messages_xcm_extension.rs b/bridges/bin/runtime-common/src/messages_xcm_extension.rs index e3da6155f08a198d5469adbfc64e40213eddf8eb..46ed4da0d85481fcc7223740084945924f9c710f 100644 --- a/bridges/bin/runtime-common/src/messages_xcm_extension.rs +++ b/bridges/bin/runtime-common/src/messages_xcm_extension.rs @@ -248,7 +248,7 @@ impl LocalXcmQueueManager { sender_and_lane: &SenderAndLane, enqueued_messages: MessageNonce, ) { - // skip if we dont want to handle congestion + // skip if we don't want to handle congestion if !H::supports_congestion_detection() { return } diff --git a/bridges/bin/runtime-common/src/mock.rs b/bridges/bin/runtime-common/src/mock.rs index deee4524e8586d45c01f08126e84b9c619cfeba2..8c4cb2233e17c7ff0f6aa05f45483db07ef2b707 100644 --- a/bridges/bin/runtime-common/src/mock.rs +++ b/bridges/bin/runtime-common/src/mock.rs @@ -379,7 +379,7 @@ impl Chain for BridgedUnderlyingChain { impl ChainWithGrandpa for BridgedUnderlyingChain { const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = ""; const MAX_AUTHORITIES_COUNT: u32 = 16; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = 8; + const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = 8; const MAX_MANDATORY_HEADER_SIZE: u32 = 256; const AVERAGE_HEADER_SIZE: u32 = 64; } diff --git a/bridges/bin/runtime-common/src/priority_calculator.rs b/bridges/bin/runtime-common/src/priority_calculator.rs index c2737128e3422dd72ca3b49d66151268d719e0bc..5035553f508dfea94a0cb5ddf9b916dd7d9b4ea5 100644 --- a/bridges/bin/runtime-common/src/priority_calculator.rs +++ b/bridges/bin/runtime-common/src/priority_calculator.rs @@ -163,7 +163,7 @@ mod integrity_tests { { // just an estimation of extra transaction bytes that are added to every transaction // (including signature, signed extensions extra and etc + in our case it includes - // all call arguments extept the proof itself) + // all call arguments except the proof itself) let base_tx_size = 512; // let's say we are relaying similar small messages and for every message we add more trie // nodes to the proof (x0.5 because we expect some nodes to be reused) diff --git a/bridges/bin/runtime-common/src/refund_relayer_extension.rs b/bridges/bin/runtime-common/src/refund_relayer_extension.rs index 8e901d72821fe1dfb749f4227c2875f1d807fccc..455392a0a277f3520cd7f58150f12e7420d36014 100644 --- a/bridges/bin/runtime-common/src/refund_relayer_extension.rs +++ b/bridges/bin/runtime-common/src/refund_relayer_extension.rs @@ -1538,7 +1538,7 @@ mod tests { } #[test] - fn validate_boosts_priority_of_message_delivery_transactons() { + fn validate_boosts_priority_of_message_delivery_transactions() { run_test(|| { initialize_environment(100, 100, 100); @@ -1568,7 +1568,7 @@ mod tests { } #[test] - fn validate_does_not_boost_priority_of_message_delivery_transactons_with_too_many_messages() { + fn validate_does_not_boost_priority_of_message_delivery_transactions_with_too_many_messages() { run_test(|| { initialize_environment(100, 100, 100); diff --git a/bridges/primitives/chain-asset-hub-rococo/Cargo.toml b/bridges/chains/chain-asset-hub-rococo/Cargo.toml similarity index 69% rename from bridges/primitives/chain-asset-hub-rococo/Cargo.toml rename to bridges/chains/chain-asset-hub-rococo/Cargo.toml index 4dfa149e0ea9ab4e0ac1804844a0c128f15bd5bb..9a6419a5b4055be348f4f8813e3c1301f14f7142 100644 --- a/bridges/primitives/chain-asset-hub-rococo/Cargo.toml +++ b/bridges/chains/chain-asset-hub-rococo/Cargo.toml @@ -5,19 +5,20 @@ version = "0.4.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate Dependencies frame-support = { path = "../../../substrate/frame/support", default-features = false } # Bridge Dependencies -bp-xcm-bridge-hub-router = { path = "../xcm-bridge-hub-router", default-features = false } +bp-xcm-bridge-hub-router = { path = "../../primitives/xcm-bridge-hub-router", default-features = false } [features] default = ["std"] diff --git a/bridges/primitives/chain-asset-hub-rococo/src/lib.rs b/bridges/chains/chain-asset-hub-rococo/src/lib.rs similarity index 100% rename from bridges/primitives/chain-asset-hub-rococo/src/lib.rs rename to bridges/chains/chain-asset-hub-rococo/src/lib.rs diff --git a/bridges/primitives/chain-asset-hub-westend/Cargo.toml b/bridges/chains/chain-asset-hub-westend/Cargo.toml similarity index 69% rename from bridges/primitives/chain-asset-hub-westend/Cargo.toml rename to bridges/chains/chain-asset-hub-westend/Cargo.toml index c9bd437562b86a97cdf2807c18b4905e695d1a5e..1c08ee28e417cb50ce9ef9ded5f17163e1bb30d4 100644 --- a/bridges/primitives/chain-asset-hub-westend/Cargo.toml +++ b/bridges/chains/chain-asset-hub-westend/Cargo.toml @@ -5,19 +5,20 @@ version = "0.3.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate Dependencies frame-support = { path = "../../../substrate/frame/support", default-features = false } # Bridge Dependencies -bp-xcm-bridge-hub-router = { path = "../xcm-bridge-hub-router", default-features = false } +bp-xcm-bridge-hub-router = { path = "../../primitives/xcm-bridge-hub-router", default-features = false } [features] default = ["std"] diff --git a/bridges/primitives/chain-asset-hub-westend/src/lib.rs b/bridges/chains/chain-asset-hub-westend/src/lib.rs similarity index 100% rename from bridges/primitives/chain-asset-hub-westend/src/lib.rs rename to bridges/chains/chain-asset-hub-westend/src/lib.rs diff --git a/bridges/primitives/chain-bridge-hub-cumulus/Cargo.toml b/bridges/chains/chain-bridge-hub-cumulus/Cargo.toml similarity index 78% rename from bridges/primitives/chain-bridge-hub-cumulus/Cargo.toml rename to bridges/chains/chain-bridge-hub-cumulus/Cargo.toml index d35eefa1c45c3f7cf479dcea2ee4da87dbc31627..4b900002a4d81abb9d7364f555a150a2af6c839c 100644 --- a/bridges/primitives/chain-bridge-hub-cumulus/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-cumulus/Cargo.toml @@ -5,6 +5,7 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true @@ -12,9 +13,9 @@ workspace = true [dependencies] # Bridge Dependencies -bp-polkadot-core = { path = "../polkadot-core", default-features = false } -bp-messages = { path = "../messages", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } +bp-polkadot-core = { path = "../../primitives/polkadot-core", default-features = false } +bp-messages = { path = "../../primitives/messages", default-features = false } +bp-runtime = { path = "../../primitives/runtime", default-features = false } # Substrate Based Dependencies diff --git a/bridges/primitives/chain-bridge-hub-cumulus/src/lib.rs b/bridges/chains/chain-bridge-hub-cumulus/src/lib.rs similarity index 100% rename from bridges/primitives/chain-bridge-hub-cumulus/src/lib.rs rename to bridges/chains/chain-bridge-hub-cumulus/src/lib.rs diff --git a/bridges/primitives/chain-bridge-hub-kusama/Cargo.toml b/bridges/chains/chain-bridge-hub-kusama/Cargo.toml similarity index 83% rename from bridges/primitives/chain-bridge-hub-kusama/Cargo.toml rename to bridges/chains/chain-bridge-hub-kusama/Cargo.toml index 8d71b3f5eb7646a81af55c0030814c604ead82ef..ff6dd8849abe3897f1c3eb3cb1de8b7d89af5ca7 100644 --- a/bridges/primitives/chain-bridge-hub-kusama/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-kusama/Cargo.toml @@ -5,6 +5,7 @@ version = "0.6.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true @@ -13,8 +14,8 @@ workspace = true # Bridge Dependencies bp-bridge-hub-cumulus = { path = "../chain-bridge-hub-cumulus", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } -bp-messages = { path = "../messages", default-features = false } +bp-runtime = { path = "../../primitives/runtime", default-features = false } +bp-messages = { path = "../../primitives/messages", default-features = false } # Substrate Based Dependencies diff --git a/bridges/primitives/chain-bridge-hub-kusama/src/lib.rs b/bridges/chains/chain-bridge-hub-kusama/src/lib.rs similarity index 100% rename from bridges/primitives/chain-bridge-hub-kusama/src/lib.rs rename to bridges/chains/chain-bridge-hub-kusama/src/lib.rs diff --git a/bridges/primitives/chain-bridge-hub-polkadot/Cargo.toml b/bridges/chains/chain-bridge-hub-polkadot/Cargo.toml similarity index 83% rename from bridges/primitives/chain-bridge-hub-polkadot/Cargo.toml rename to bridges/chains/chain-bridge-hub-polkadot/Cargo.toml index 4e89e8a5c9a173bc9a084af9e7c609a6bae287a8..da8b8a82fa702eeab719335fa9968b78ee965163 100644 --- a/bridges/primitives/chain-bridge-hub-polkadot/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-polkadot/Cargo.toml @@ -5,6 +5,7 @@ version = "0.6.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true @@ -14,8 +15,8 @@ workspace = true # Bridge Dependencies bp-bridge-hub-cumulus = { path = "../chain-bridge-hub-cumulus", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } -bp-messages = { path = "../messages", default-features = false } +bp-runtime = { path = "../../primitives/runtime", default-features = false } +bp-messages = { path = "../../primitives/messages", default-features = false } # Substrate Based Dependencies diff --git a/bridges/primitives/chain-bridge-hub-polkadot/src/lib.rs b/bridges/chains/chain-bridge-hub-polkadot/src/lib.rs similarity index 100% rename from bridges/primitives/chain-bridge-hub-polkadot/src/lib.rs rename to bridges/chains/chain-bridge-hub-polkadot/src/lib.rs diff --git a/bridges/primitives/chain-bridge-hub-rococo/Cargo.toml b/bridges/chains/chain-bridge-hub-rococo/Cargo.toml similarity index 83% rename from bridges/primitives/chain-bridge-hub-rococo/Cargo.toml rename to bridges/chains/chain-bridge-hub-rococo/Cargo.toml index 1643d934a982ec0795fc370b221dc35d36d3b492..f7672df012f2fc2a21cfc987468427a3222317ea 100644 --- a/bridges/primitives/chain-bridge-hub-rococo/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-rococo/Cargo.toml @@ -5,6 +5,7 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true @@ -13,8 +14,8 @@ workspace = true # Bridge Dependencies bp-bridge-hub-cumulus = { path = "../chain-bridge-hub-cumulus", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } -bp-messages = { path = "../messages", default-features = false } +bp-runtime = { path = "../../primitives/runtime", default-features = false } +bp-messages = { path = "../../primitives/messages", default-features = false } # Substrate Based Dependencies diff --git a/bridges/primitives/chain-bridge-hub-rococo/src/lib.rs b/bridges/chains/chain-bridge-hub-rococo/src/lib.rs similarity index 100% rename from bridges/primitives/chain-bridge-hub-rococo/src/lib.rs rename to bridges/chains/chain-bridge-hub-rococo/src/lib.rs diff --git a/bridges/primitives/chain-bridge-hub-westend/Cargo.toml b/bridges/chains/chain-bridge-hub-westend/Cargo.toml similarity index 83% rename from bridges/primitives/chain-bridge-hub-westend/Cargo.toml rename to bridges/chains/chain-bridge-hub-westend/Cargo.toml index 32a7850c5392fd892ec40e9968fa365ec59cbba3..ec74c4b947d693dba92d4da5051526e49349e0a5 100644 --- a/bridges/primitives/chain-bridge-hub-westend/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-westend/Cargo.toml @@ -5,6 +5,7 @@ version = "0.3.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true @@ -14,8 +15,8 @@ workspace = true # Bridge Dependencies bp-bridge-hub-cumulus = { path = "../chain-bridge-hub-cumulus", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } -bp-messages = { path = "../messages", default-features = false } +bp-runtime = { path = "../../primitives/runtime", default-features = false } +bp-messages = { path = "../../primitives/messages", default-features = false } # Substrate Based Dependencies diff --git a/bridges/primitives/chain-bridge-hub-westend/src/lib.rs b/bridges/chains/chain-bridge-hub-westend/src/lib.rs similarity index 100% rename from bridges/primitives/chain-bridge-hub-westend/src/lib.rs rename to bridges/chains/chain-bridge-hub-westend/src/lib.rs diff --git a/bridges/primitives/chain-kusama/Cargo.toml b/bridges/chains/chain-kusama/Cargo.toml similarity index 71% rename from bridges/primitives/chain-kusama/Cargo.toml rename to bridges/chains/chain-kusama/Cargo.toml index 0660f34602389d1cf9e1ec88b57d1a9aeb9a3830..66061ff2793cbdd3419fa8894ab78e37486102ea 100644 --- a/bridges/primitives/chain-kusama/Cargo.toml +++ b/bridges/chains/chain-kusama/Cargo.toml @@ -5,6 +5,7 @@ version = "0.5.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true @@ -13,9 +14,9 @@ workspace = true # Bridge Dependencies -bp-header-chain = { path = "../header-chain", default-features = false } -bp-polkadot-core = { path = "../polkadot-core", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } +bp-header-chain = { path = "../../primitives/header-chain", default-features = false } +bp-polkadot-core = { path = "../../primitives/polkadot-core", default-features = false } +bp-runtime = { path = "../../primitives/runtime", default-features = false } # Substrate Based Dependencies diff --git a/bridges/primitives/chain-kusama/src/lib.rs b/bridges/chains/chain-kusama/src/lib.rs similarity index 95% rename from bridges/primitives/chain-kusama/src/lib.rs rename to bridges/chains/chain-kusama/src/lib.rs index e3b4d0520f61c858b54d78dfa4a45f57bac411fb..a81004afe8127b556211d0207d2bc1f9ecc02955 100644 --- a/bridges/primitives/chain-kusama/src/lib.rs +++ b/bridges/chains/chain-kusama/src/lib.rs @@ -53,8 +53,8 @@ impl Chain for Kusama { impl ChainWithGrandpa for Kusama { const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = WITH_KUSAMA_GRANDPA_PALLET_NAME; const MAX_AUTHORITIES_COUNT: u32 = MAX_AUTHORITIES_COUNT; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = - REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY; + const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = + REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY; const MAX_MANDATORY_HEADER_SIZE: u32 = MAX_MANDATORY_HEADER_SIZE; const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; } diff --git a/bridges/primitives/chain-polkadot-bulletin/Cargo.toml b/bridges/chains/chain-polkadot-bulletin/Cargo.toml similarity index 68% rename from bridges/primitives/chain-polkadot-bulletin/Cargo.toml rename to bridges/chains/chain-polkadot-bulletin/Cargo.toml index 15c824fcbdb31c2c84f63bd56d4d0b3f90efc5b1..2db16a00e92492e3a167458343a88a24c2186748 100644 --- a/bridges/primitives/chain-polkadot-bulletin/Cargo.toml +++ b/bridges/chains/chain-polkadot-bulletin/Cargo.toml @@ -5,20 +5,21 @@ version = "0.4.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Bridge Dependencies -bp-header-chain = { path = "../header-chain", default-features = false } -bp-messages = { path = "../messages", default-features = false } -bp-polkadot-core = { path = "../polkadot-core", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } +bp-header-chain = { path = "../../primitives/header-chain", default-features = false } +bp-messages = { path = "../../primitives/messages", default-features = false } +bp-polkadot-core = { path = "../../primitives/polkadot-core", default-features = false } +bp-runtime = { path = "../../primitives/runtime", default-features = false } # Substrate Based Dependencies diff --git a/bridges/primitives/chain-polkadot-bulletin/src/lib.rs b/bridges/chains/chain-polkadot-bulletin/src/lib.rs similarity index 96% rename from bridges/primitives/chain-polkadot-bulletin/src/lib.rs rename to bridges/chains/chain-polkadot-bulletin/src/lib.rs index f2eebf9312470a42e1d3a1c7d67ab8b7a38af189..f3d300567f2b4f92cec272e0929a3c53d718c823 100644 --- a/bridges/primitives/chain-polkadot-bulletin/src/lib.rs +++ b/bridges/chains/chain-polkadot-bulletin/src/lib.rs @@ -43,7 +43,7 @@ use sp_runtime::{traits::DispatchInfoOf, transaction_validity::TransactionValidi pub use bp_polkadot_core::{ AccountAddress, AccountId, Balance, Block, BlockNumber, Hash, Hasher, Header, Nonce, Signature, SignedBlock, UncheckedExtrinsic, AVERAGE_HEADER_SIZE, EXTRA_STORAGE_PROOF_SIZE, - MAX_MANDATORY_HEADER_SIZE, REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY, + MAX_MANDATORY_HEADER_SIZE, REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY, }; /// Maximal number of GRANDPA authorities at Polkadot Bulletin chain. @@ -62,7 +62,7 @@ const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(90); // Re following constants - we are using the same values at Cumulus parachains. They are limited // by the maximal transaction weight/size. Since block limits at Bulletin Chain are larger than -// at the Cumulus Bridgeg Hubs, we could reuse the same values. +// at the Cumulus Bridge Hubs, we could reuse the same values. /// Maximal number of unrewarded relayer entries at inbound lane for Cumulus-based parachains. pub const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = 1024; @@ -207,8 +207,8 @@ impl Chain for PolkadotBulletin { impl ChainWithGrandpa for PolkadotBulletin { const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = WITH_POLKADOT_BULLETIN_GRANDPA_PALLET_NAME; const MAX_AUTHORITIES_COUNT: u32 = MAX_AUTHORITIES_COUNT; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = - REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY; + const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = + REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY; const MAX_MANDATORY_HEADER_SIZE: u32 = MAX_MANDATORY_HEADER_SIZE; const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; } diff --git a/bridges/primitives/chain-polkadot/Cargo.toml b/bridges/chains/chain-polkadot/Cargo.toml similarity index 71% rename from bridges/primitives/chain-polkadot/Cargo.toml rename to bridges/chains/chain-polkadot/Cargo.toml index 6421b7f40106e404eeba04be72f5448fd5f65159..c700935f3083b5f287277c7d9975be53352b2506 100644 --- a/bridges/primitives/chain-polkadot/Cargo.toml +++ b/bridges/chains/chain-polkadot/Cargo.toml @@ -5,6 +5,7 @@ version = "0.5.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true @@ -13,9 +14,9 @@ workspace = true # Bridge Dependencies -bp-header-chain = { path = "../header-chain", default-features = false } -bp-polkadot-core = { path = "../polkadot-core", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } +bp-header-chain = { path = "../../primitives/header-chain", default-features = false } +bp-polkadot-core = { path = "../../primitives/polkadot-core", default-features = false } +bp-runtime = { path = "../../primitives/runtime", default-features = false } # Substrate Based Dependencies diff --git a/bridges/primitives/chain-polkadot/src/lib.rs b/bridges/chains/chain-polkadot/src/lib.rs similarity index 95% rename from bridges/primitives/chain-polkadot/src/lib.rs rename to bridges/chains/chain-polkadot/src/lib.rs index fc5e10308a8e33463a74c041f157daaef09cc9c8..00d35783a9b61844bab7701fdb60711125447ca3 100644 --- a/bridges/primitives/chain-polkadot/src/lib.rs +++ b/bridges/chains/chain-polkadot/src/lib.rs @@ -55,8 +55,8 @@ impl Chain for Polkadot { impl ChainWithGrandpa for Polkadot { const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = WITH_POLKADOT_GRANDPA_PALLET_NAME; const MAX_AUTHORITIES_COUNT: u32 = MAX_AUTHORITIES_COUNT; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = - REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY; + const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = + REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY; const MAX_MANDATORY_HEADER_SIZE: u32 = MAX_MANDATORY_HEADER_SIZE; const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; } diff --git a/bridges/primitives/chain-rococo/Cargo.toml b/bridges/chains/chain-rococo/Cargo.toml similarity index 71% rename from bridges/primitives/chain-rococo/Cargo.toml rename to bridges/chains/chain-rococo/Cargo.toml index de373f0ae64b8d9a8124dbcdfcca3c2bf05e2787..5a5613bb376a5a4f75c773b3350993262149f973 100644 --- a/bridges/primitives/chain-rococo/Cargo.toml +++ b/bridges/chains/chain-rococo/Cargo.toml @@ -5,6 +5,7 @@ version = "0.6.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true @@ -13,9 +14,9 @@ workspace = true # Bridge Dependencies -bp-header-chain = { path = "../header-chain", default-features = false } -bp-polkadot-core = { path = "../polkadot-core", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } +bp-header-chain = { path = "../../primitives/header-chain", default-features = false } +bp-polkadot-core = { path = "../../primitives/polkadot-core", default-features = false } +bp-runtime = { path = "../../primitives/runtime", default-features = false } # Substrate Based Dependencies diff --git a/bridges/primitives/chain-rococo/src/lib.rs b/bridges/chains/chain-rococo/src/lib.rs similarity index 95% rename from bridges/primitives/chain-rococo/src/lib.rs rename to bridges/chains/chain-rococo/src/lib.rs index f1b256f0f090f048cc8db3a16c112ed8b938f6ce..2385dd2cbb250181ce5f46aef9f1e76f8fd010d2 100644 --- a/bridges/primitives/chain-rococo/src/lib.rs +++ b/bridges/chains/chain-rococo/src/lib.rs @@ -53,8 +53,8 @@ impl Chain for Rococo { impl ChainWithGrandpa for Rococo { const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = WITH_ROCOCO_GRANDPA_PALLET_NAME; const MAX_AUTHORITIES_COUNT: u32 = MAX_AUTHORITIES_COUNT; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = - REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY; + const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = + REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY; const MAX_MANDATORY_HEADER_SIZE: u32 = MAX_MANDATORY_HEADER_SIZE; const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; } diff --git a/bridges/primitives/chain-westend/Cargo.toml b/bridges/chains/chain-westend/Cargo.toml similarity index 71% rename from bridges/primitives/chain-westend/Cargo.toml rename to bridges/chains/chain-westend/Cargo.toml index e55a8d649a88b88d84f4f3547c23709cef67d872..10b06d76507ef95bbff00f5560b705ecee1ec4ce 100644 --- a/bridges/primitives/chain-westend/Cargo.toml +++ b/bridges/chains/chain-westend/Cargo.toml @@ -5,6 +5,7 @@ version = "0.3.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true @@ -13,9 +14,9 @@ workspace = true # Bridge Dependencies -bp-header-chain = { path = "../header-chain", default-features = false } -bp-polkadot-core = { path = "../polkadot-core", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } +bp-header-chain = { path = "../../primitives/header-chain", default-features = false } +bp-polkadot-core = { path = "../../primitives/polkadot-core", default-features = false } +bp-runtime = { path = "../../primitives/runtime", default-features = false } # Substrate Based Dependencies diff --git a/bridges/primitives/chain-westend/src/lib.rs b/bridges/chains/chain-westend/src/lib.rs similarity index 95% rename from bridges/primitives/chain-westend/src/lib.rs rename to bridges/chains/chain-westend/src/lib.rs index f03fd2160a700eb3817a6feb629e9d366cc366aa..b344b7f4bf93392c08502446513a9ae39296b512 100644 --- a/bridges/primitives/chain-westend/src/lib.rs +++ b/bridges/chains/chain-westend/src/lib.rs @@ -53,8 +53,8 @@ impl Chain for Westend { impl ChainWithGrandpa for Westend { const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = WITH_WESTEND_GRANDPA_PALLET_NAME; const MAX_AUTHORITIES_COUNT: u32 = MAX_AUTHORITIES_COUNT; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = - REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY; + const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = + REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY; const MAX_MANDATORY_HEADER_SIZE: u32 = MAX_MANDATORY_HEADER_SIZE; const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; } diff --git a/bridges/docs/bridge-relayers-claim-rewards.png b/bridges/docs/bridge-relayers-claim-rewards.png new file mode 100644 index 0000000000000000000000000000000000000000..d56b8dd871e8445e7cab49517123b0092ce09137 Binary files /dev/null and b/bridges/docs/bridge-relayers-claim-rewards.png differ diff --git a/bridges/docs/bridge-relayers-deregister.png b/bridges/docs/bridge-relayers-deregister.png new file mode 100644 index 0000000000000000000000000000000000000000..e7706cee78916d7e2bbcfd7ee4a1a046a0450f87 Binary files /dev/null and b/bridges/docs/bridge-relayers-deregister.png differ diff --git a/bridges/docs/bridge-relayers-register.png b/bridges/docs/bridge-relayers-register.png new file mode 100644 index 0000000000000000000000000000000000000000..e9e3e1b5ac87c5c9d31477c696912fcbc93b0c78 Binary files /dev/null and b/bridges/docs/bridge-relayers-register.png differ diff --git a/bridges/docs/running-relayer.md b/bridges/docs/running-relayer.md new file mode 100644 index 0000000000000000000000000000000000000000..710810a476e4df5e4b80fde31f9576be5ad26391 --- /dev/null +++ b/bridges/docs/running-relayer.md @@ -0,0 +1,343 @@ +# Running your own bridge relayer + +:warning: :construction: Please read the [Disclaimer](#disclaimer) section first :construction: :warning: + +## Disclaimer + +There are several things you should know before running your own relayer: + +- initial bridge version (we call it bridges v1) supports any number of relayers, but **there's no guaranteed +compensation** for running a relayer and/or submitting valid bridge transactions. Most probably you'll end up +spending more funds than getting from rewards - please accept this fact; + +- even if your relayer has managed to submit a valid bridge transaction that has been included into the bridge +hub block, there's no guarantee that you will be able to claim your compensation for that transaction. That's +because compensations are paid from the account, controlled by relay chain governance and it could have no funds +to compensate your useful actions. We'll be working on a proper process to resupply it on-time, but we can't +provide any guarantee until that process is well established. + +## A Brief Introduction into Relayers and our Compensations Scheme + +Omitting details, relayer is an offchain process that is connected to both bridged chains. It looks at the +outbound bridge messages queue and submits message delivery transactions to the target chain. There's a lot +of details behind that simple phrase - you could find more info in the +[High-Level Bridge Overview](./high-level-overview.md) document. + +Reward that is paid to relayer has two parts. The first part static and is controlled by the governance. +It is rather small initially - e.g. you need to deliver `10_000` Kusama -> Polkadot messages to gain single +KSM token. + +The other reward part is dynamic. So to deliver an XCM message from one BridgeHub to another, we'll need to +submit two transactions on different chains. Every transaction has its cost, which is: + +- dynamic, because e.g. message size can change and/or fee factor of the target chain may change; + +- quite large, because those transactions are quite heavy (mostly in terms of size, not weight). + +We are compensating the cost of **valid**, **minimal** and **useful** bridge-related transactions to +relayer, that has submitted such transaction. Valid here means that the transaction doesn't fail. Minimal +means that all data within transaction call is actually required for the transaction to succeed. Useful +means that all supplied data in transaction is new and yet unknown to the target chain. + +We have implemented a relayer that is able to craft such transactions. The rest of document contains a detailed +information on how to deploy this software on your own node. + +## Relayers Concurrency + +As it has been said above, we are not compensating cost of transactions that are not **useful**. For +example, if message `100` has already been delivered from Kusama Bridge Hub to Polkadot Bridge Hub, then another +transaction that delivers the same message `100` won't be **useful**. Hence, no compensation to relayer that +has submitted that second transaction. + +But what if there are several relayers running? They are noticing the same queued message `100` and +simultaneously submit identical message delivery transactions. You may expect that there'll be one lucky +relayer, whose transaction would win the "race" and which will receive the compensation and reward. And +there'll be several other relayers, losing some funds on their unuseful transactions. + +But actually, we have a solution that invalidates transactions of "unlucky" relayers before they are +included into the block. So at least you may be sure that you won't waste your funds on duplicate transactions. + +
+Some details? + +All **unuseful** transactions are rejected by our +[transaction extension](https://github.com/paritytech/polkadot-sdk/blob/master/bridges/bin/runtime-common/src/refund_relayer_extension.rs), +which also handles transaction fee compensations. You may find more info on unuseful (aka obsolete) transactions +by lurking in the code. + +We also have the WiP prototype of relayers coordination protocol, where relayers will get some guarantee +that their transactions will be prioritized over other relayers transactions at their assigned slots. +That is planned for the future version of bridge and the progress is +[tracked here](https://github.com/paritytech/parity-bridges-common/issues/2486). + +
+ +## Prerequisites + +Let's focus on the bridge between Polkadot and Kusama Bridge Hubs. Let's also assume that we want to start +a relayer that "serves" an initial lane [`0x00000001`](https://github.com/polkadot-fellows/runtimes/blob/9ce1bbbbcd7843b3c76ba4d43c036bc311959e9f/system-parachains/bridge-hubs/bridge-hub-kusama/src/bridge_to_polkadot_config.rs#L54). + +
+Lane? + +Think of lane as a queue of messages that need to be delivered to the other/bridged chain. The lane is +bidirectional, meaning that there are four "endpoints". Two "outbound" endpoints (one at every chain), contain +messages that need to be delivered to the bridged chain. Two "inbound" are accepting messages from the bridged +chain and also remember the relayer, who has delivered message(s) to reward it later. + +
+ +The same steps may be performed for other lanes and bridges as well - you'll just need to change several parameters. + +So to start your relayer instance, you'll need to prepare: + +- an address of ws/wss RPC endpoint of the Kusama relay chain; + +- an address of ws/wss RPC endpoint of the Polkadot relay chain; + +- an address of ws/wss RPC endpoint of the Kusama Bridge Hub chain; + +- an address of ws/wss RPC endpoint of the Polkadot Bridge Hub chain; + +- an account on Kusama Bridge Hub; + +- an account on Polkadot Bridge Hub. + +For RPC endpoints, you could start your own nodes, or use some public community nodes. Nodes are not meant to be +archive or provide access to insecure RPC calls. + +To create an account on Bridge Hubs, you could use XCM teleport functionality. E.g. if you have an account on +the relay chain, you could use the `teleportAssets` call of `xcmPallet` and send asset +`V3 { id: Concrete(0, Here), Fungible: }` to beneficiary `V3(0, X1(AccountId32()))` +on destination `V3(0, X1(Parachain(1002)))`. To estimate amounts you need, please refer to the [Costs](#costs) +section of the document. + +## Registering your Relayer Account (Optional, But Please Read) + +Bridge transactions are quite heavy and expensive. We want to minimize block space that can be occupied by +invalid bridge transactions and prioritize valid transactions over invalid. That is achieved by **optional** +relayer registration. Transactions, signed by relayers with active registration, gain huge priority boost. +In exchange, such relayers may be slashed if they submit **invalid** or **non-minimal** transaction. + +Transactions, signed by relayers **without** active registration, on the other hand, receive no priority +boost. It means that if there is active registered relayer, most likely all transactions from unregistered +will be counted as **unuseful**, not included into the block and unregistered relayer won't get any reward +for his operations. + +Before registering, you should know several things about your funds: + +- to register, you need to hold significant amount of funds on your relayer account. As of now, it is + [100 KSM](https://github.com/polkadot-fellows/runtimes/blob/9ce1bbbbcd7843b3c76ba4d43c036bc311959e9f/system-parachains/bridge-hubs/bridge-hub-kusama/src/bridge_to_polkadot_config.rs#L71C14-L71C43) + for registration on Kusama Bridge Hub and + [500 DOT](https://github.com/polkadot-fellows/runtimes/blob/9ce1bbbbcd7843b3c76ba4d43c036bc311959e9f/system-parachains/bridge-hubs/bridge-hub-polkadot/src/bridge_to_kusama_config.rs#L71C14-L71C43) + for registration on Polkadot Bridge Hub; + +- when you are registered, those funds are reserved on relayer account and you can't transfer them. + +The registration itself, has three states: active, inactive or expired. Initially, it is active, meaning that all +your transactions that are **validated** on top of block, where it is active get priority boost. Registration +becomes expired when the block with the number you have specified during registration is "mined". It is the +`validTill` parameter of the `register` call (see below). After that `validTill` block, you may unregister and get +your reserved funds back. There's also an intermediate point between those blocks - it is the `validTill - LEASE`, +where `LEASE` is the the chain constant, controlled by the governance. Initially it is set to `300` blocks. +All your transactions, **validated** between the `validTill - LEASE` and `validTill` blocks do not get the +priority boost. Also, it is forbidden to specify `validTill` such that the `validTill - currentBlock` is less +than the `LEASE`. + +
+Example? + +| Bridge Hub Block | Registration State | Comment | +| ----------------- | ------------------ | ------------------------------------------------------ | +| 100 | Active | You have submitted a tx with the `register(1000)` call | +| 101 | Active | Your message delivery transactions are boosted | +| 102 | Active | Your message delivery transactions are boosted | +| ... | Active | Your message delivery transactions are boosted | +| 700 | Inactive | Your message delivery transactions are not boosted | +| 701 | Inactive | Your message delivery transactions are not boosted | +| ... | Inactive | Your message delivery transactions are not boosted | +| 1000 | Expired | Your may submit a tx with the `deregister` call | + +
+ +So once you have enough funds on your account and have selected the `validTill` parameter value, you +could use the Polkadot JS apps to submit an extrinsic. If you want priority boost for your transactions +on the Kusama Bridge Hub, open the +[Polkadot JS Apps](https://polkadot.js.org/apps/?rpc=wss%3A%2F%2Fkusama-bridge-hub-rpc.polkadot.io#/extrinsics) +and submit the `register` extrinsic from the `bridgeRelayers` pallet: + +![Register Extrinsic](./bridge-relayers-register.png) + +To deregister, submit the simple `deregister` extrinsic when registration is expired: + +![Deregister Extrinsic](./bridge-relayers-deregister.png) + +At any time, you can prolong your registration by calling the `register` with the larger `validTill`. + +## Costs + +Your relayer account (on both Bridge Hubs) must hold enough funds to be able to pay costs of bridge +transactions. If your relayer behaves correctly, those costs will be compensated and you will be +able to claim it later. + +**IMPORTANT**: you may add tip to your bridge transactions to boost their priority. But our +compensation mechanism never refunds transaction tip, so all tip tokens will be lost. + +
+Types of bridge transactions + +There are two types of bridge transactions: + +- message delivery transaction brings queued message(s) from one Bridge Hub to another. We record + the fact that this specific (your) relayer has delivered those messages; + +- message confirmation transaction confirms that some message have been delivered and also brings + back information on how many messages (your) relayer has delivered. We use this information later + to register delivery rewards on the source chain. + +Several messages/confirmations may be included in a single bridge transaction. Apart from this +data, bridge transaction may include finality and storage proofs, required to prove authenticity of +this data. + +
+ +To deliver and get reward for a single message, the relayer needs to submit two transactions. One +at the source Bridge Hub and one at the target Bridge Hub. Below are costs for Polkadot <> Kusama +messages (as of today): + +- to deliver a single Polkadot -> Kusama message, you would need to pay around `0.06 KSM` at Kusama + Bridge Hub and around `1.62 DOT` at Polkadot Bridge Hub; + +- to deliver a single Kusama -> Polkadot message, you would need to pay around `1.70 DOT` at Polkadot + Bridge Hub and around `0.05 KSM` at Kusama Bridge Hub. + +Those values are not constants - they depend on call weights (that may change from release to release), +on transaction sizes (that depends on message size and chain state) and congestion factor. In any +case - it is your duty to make sure that the relayer has enough funds to pay transaction fees. + +## Claiming your Compensations and Rewards + +Hopefully you have successfully delivered some messages and now can claim your compensation and reward. +This requires submitting several transactions. But first, let's check that you actually have something to +claim. For that, let's check the state of the pallet that tracks all rewards. + +To check your rewards at the Kusama Bridge Hub, go to the +[Polkadot JS Apps](https://polkadot.js.org/apps/?rpc=wss%3A%2F%2Fkusama-bridge-hub-rpc.polkadot.io#/chainstate) +targeting Kusama Bridge Hub, select the `bridgeRelayers` pallet, choose `relayerRewards` map and +your relayer account. Then: + +- set the `laneId` to `0x00000001` + +- set the `bridgedChainId` to `bhpd`; + +- check the both variants of the `owner` field: `ThisChain` is used to pay for message delivery transactions + and `BridgedChain` is used to pay for message confirmation transactions. + +If check shows that you have some rewards, you can craft the claim transaction, with similar parameters. +For that, go to `Extrinsics` tab of the +[Polkadot JS Apps](https://polkadot.js.org/apps/?rpc=wss%3A%2F%2Fkusama-bridge-hub-rpc.polkadot.io#/extrinsics) +and submit the following transaction (make sure to change `owner` before): + +![Claim Rewards Extrinsic](./bridge-relayers-claim-rewards.png) + +To claim rewards on Polkadot Bridge Hub you can follow the same process. The only difference is that you +need to set value of the `bridgedChainId` to `bhks`. + +## Starting your Relayer + +### Starting your Rococo <> Westend Relayer + +You may find the relayer image reference in the +[Releases](https://github.com/paritytech/parity-bridges-common/releases) +of this repository. Make sure to check supported (bundled) versions +of release there. For Rococo <> Westend bridge, normally you may use the +latest published release. The release notes always contain the docker +image reference and source files, required to build relayer manually. + +Once you have the docker image, update variables and run the following script: +```sh +export DOCKER_IMAGE= + +export ROCOCO_HOST= +export ROCOCO_PORT= +# or set it to '--rococo-secure' if wss is used above +export ROCOCO_IS_SECURE= +export BRIDGE_HUB_ROCOCO_HOST= +export BRIDGE_HUB_ROCOCO_PORT= +# or set it to '--bridge-hub-rococo-secure' if wss is used above +export BRIDGE_HUB_ROCOCO_IS_SECURE= +export BRIDGE_HUB_ROCOCO_KEY_FILE= + +export WESTEND_HOST= +export WESTEND_PORT= +# or set it to '--westend-secure' if wss is used above +export WESTEND_IS_SECURE= +export BRIDGE_HUB_WESTEND_HOST= +export BRIDGE_HUB_WESTEND_PORT= +# or set it to '--bridge-hub-westend-secure ' if wss is used above +export BRIDGE_HUB_WESTEND_IS_SECURE= +export BRIDGE_HUB_WESTEND_KEY_FILE= + +# you can get extended relay logs (e.g. for debugging issues) by passing `-e RUST_LOG=bridge=trace` +# argument to the `docker` binary +docker run \ + -v $BRIDGE_HUB_ROCOCO_KEY_FILE:/bhr.key \ + -v $BRIDGE_HUB_WESTEND_KEY_FILE:/bhw.key \ + $DOCKER_IMAGE \ + relay-headers-and-messages bridge-hub-rococo-bridge-hub-westend \ + --rococo-host $ROCOCO_HOST \ + --rococo-port $ROCOCO_PORT \ + $ROCOCO_IS_SECURE \ + --rococo-version-mode Auto \ + --bridge-hub-rococo-host $BRIDGE_HUB_ROCOCO_HOST \ + --bridge-hub-rococo-port $BRIDGE_HUB_ROCOCO_PORT \ + $BRIDGE_HUB_ROCOCO_IS_SECURE \ + --bridge-hub-rococo-version-mode Auto \ + --bridge-hub-rococo-signer-file /bhr.key \ + --bridge-hub-rococo-transactions-mortality 16 \ + --westend-host $WESTEND_HOST \ + --westend-port $WESTEND_PORT \ + $WESTEND_IS_SECURE \ + --westend-version-mode Auto \ + --bridge-hub-westend-host $BRIDGE_HUB_WESTEND_HOST \ + --bridge-hub-westend-port $BRIDGE_HUB_WESTEND_PORT \ + $BRIDGE_HUB_WESTEND_IS_SECURE \ + --bridge-hub-westend-version-mode Auto \ + --bridge-hub-westend-signer-file /bhw.key \ + --bridge-hub-westend-transactions-mortality 16 \ + --lane 00000002 +``` + +### Starting your Polkadot <> Kusama Relayer + +*Work in progress, coming soon* + +### Watching your relayer state + +Our relayer provides some Prometheus metrics that you may convert into some fancy Grafana dashboards +and alerts. By default, metrics are exposed at port `9616`. To expose endpoint to the localhost, change +the docker command by adding following two lines: + +```sh +docker run \ + .. + -p 127.0.0.1:9616:9616 \ # tell Docker to bind container port 9616 to host port 9616 + # and listen for connections on the host' localhost interface + .. + $DOCKER_IMAGE \ + relay-headers-and-messages bridge-hub-rococo-bridge-hub-westend \ + --prometheus-host 0.0.0.0 \ # tell `substrate-relay` binary to accept Prometheus endpoint + # connections from everywhere + .. +``` + +You can find more info on configuring Prometheus and Grafana in the +[Monitor your node](https://wiki.polkadot.network/docs/maintain-guides-how-to-monitor-your-node) +guide from Polkadot wiki. + +We have our own set of Grafana dashboards and alerts. You may use them for inspiration. +Please find them in this folder: + +- for Rococo <> Westend bridge: [rococo-westend](https://github.com/paritytech/parity-bridges-common/tree/master/deployments/bridges/rococo-westend). + +- for Polkadot <> Kusama bridge: *work in progress, coming soon* diff --git a/bridges/modules/grandpa/Cargo.toml b/bridges/modules/grandpa/Cargo.toml index dccd7b3bdca3533cda4fec82ed0266d0b221b7a7..0db1827211a05f715cd1aed0db93da0f52c9d67c 100644 --- a/bridges/modules/grandpa/Cargo.toml +++ b/bridges/modules/grandpa/Cargo.toml @@ -5,6 +5,7 @@ description = "Module implementing GRANDPA on-chain light client used for bridgi authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true @@ -12,10 +13,10 @@ workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } finality-grandpa = { version = "0.16.2", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Bridge Dependencies diff --git a/bridges/modules/grandpa/README.md b/bridges/modules/grandpa/README.md index 992bd2cc47228249310c56747416b07be6e1e287..4a3099b8afc654bfced296aaa0ead4a5d113eb7f 100644 --- a/bridges/modules/grandpa/README.md +++ b/bridges/modules/grandpa/README.md @@ -10,7 +10,7 @@ It is used by the parachains light client (bridge parachains pallet) and by mess ## A Brief Introduction into GRANDPA Finality You can find detailed information on GRANDPA, by exploring its [repository](https://github.com/paritytech/finality-grandpa). -Here is the minimal reqiuired GRANDPA information to understand how pallet works. +Here is the minimal required GRANDPA information to understand how pallet works. Any Substrate chain may use different block authorship algorithms (like BABE or Aura) to determine block producers and generate blocks. This has nothing common with finality, though - the task of block authorship is to coordinate diff --git a/bridges/modules/grandpa/src/call_ext.rs b/bridges/modules/grandpa/src/call_ext.rs index e3c778b480baa51a8b9e5d04564ac54bc7a68a21..4a7ebb3cc8d42d7cb9d97d5c6990bb33658416bd 100644 --- a/bridges/modules/grandpa/src/call_ext.rs +++ b/bridges/modules/grandpa/src/call_ext.rs @@ -205,7 +205,7 @@ pub(crate) fn submit_finality_proof_info_from_args, I: 'static>( // as an extra weight. let votes_ancestries_len = justification.votes_ancestries.len().saturated_into(); let extra_weight = - if votes_ancestries_len > T::BridgedChain::REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY { + if votes_ancestries_len > T::BridgedChain::REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY { T::WeightInfo::submit_finality_proof(precommits_len, votes_ancestries_len) } else { Weight::zero() @@ -396,11 +396,11 @@ mod tests { let finality_target = test_header(1); let mut justification_params = JustificationGeneratorParams { header: finality_target.clone(), - ancestors: TestBridgedChain::REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY, + ancestors: TestBridgedChain::REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY, ..Default::default() }; - // when there are `REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY` headers => no refund + // when there are `REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY` headers => no refund let justification = make_justification_for_header(justification_params.clone()); let call = RuntimeCall::Grandpa(crate::Call::submit_finality_proof_ex { finality_target: Box::new(finality_target.clone()), @@ -409,7 +409,7 @@ mod tests { }); assert_eq!(call.submit_finality_proof_info().unwrap().extra_weight, Weight::zero()); - // when there are `REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY + 1` headers => full refund + // when there are `REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY + 1` headers => full refund justification_params.ancestors += 1; let justification = make_justification_for_header(justification_params); let call_weight = ::WeightInfo::submit_finality_proof( diff --git a/bridges/modules/grandpa/src/lib.rs b/bridges/modules/grandpa/src/lib.rs index ce2c47da954fa46efc4c70e9608864735fa16277..9e095651ef81da1e5418d7532ae56ae0fb8ef564 100644 --- a/bridges/modules/grandpa/src/lib.rs +++ b/bridges/modules/grandpa/src/lib.rs @@ -935,7 +935,7 @@ mod tests { } #[test] - fn succesfully_imports_header_with_valid_finality() { + fn successfully_imports_header_with_valid_finality() { run_test(|| { initialize_substrate_bridge(); @@ -1192,7 +1192,7 @@ mod tests { header.digest = change_log(0); let justification = make_justification_for_header(JustificationGeneratorParams { header: header.clone(), - ancestors: TestBridgedChain::REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY + 1, + ancestors: TestBridgedChain::REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY + 1, ..Default::default() }); diff --git a/bridges/modules/grandpa/src/mock.rs b/bridges/modules/grandpa/src/mock.rs index 4318d663a2e17fe80199830d443f5b6a85fae441..e689e520c92ffcb230a83f7a728722a688729417 100644 --- a/bridges/modules/grandpa/src/mock.rs +++ b/bridges/modules/grandpa/src/mock.rs @@ -87,7 +87,7 @@ impl Chain for TestBridgedChain { impl ChainWithGrandpa for TestBridgedChain { const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = ""; const MAX_AUTHORITIES_COUNT: u32 = MAX_BRIDGED_AUTHORITIES; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = 8; + const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = 8; const MAX_MANDATORY_HEADER_SIZE: u32 = 256; const AVERAGE_HEADER_SIZE: u32 = 64; } diff --git a/bridges/modules/messages/Cargo.toml b/bridges/modules/messages/Cargo.toml index 173d6f1c16448517b7051cfba2f96625ff3d525a..df5b92db7402bd048b1afca9c13cfb1dddc74863 100644 --- a/bridges/modules/messages/Cargo.toml +++ b/bridges/modules/messages/Cargo.toml @@ -5,15 +5,16 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } num-traits = { version = "0.2", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Bridge dependencies diff --git a/bridges/modules/messages/src/inbound_lane.rs b/bridges/modules/messages/src/inbound_lane.rs index 966ec939e70e22e830ee30157d2d7da74d59733c..da1698e6e0370f9f84ca8dd53bc1ebc99f696017 100644 --- a/bridges/modules/messages/src/inbound_lane.rs +++ b/bridges/modules/messages/src/inbound_lane.rs @@ -21,7 +21,7 @@ use crate::Config; use bp_messages::{ target_chain::{DispatchMessage, DispatchMessageData, MessageDispatch}, DeliveredMessages, InboundLaneData, LaneId, MessageKey, MessageNonce, OutboundLaneData, - ReceivalResult, UnrewardedRelayer, + ReceptionResult, UnrewardedRelayer, }; use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; use frame_support::traits::Get; @@ -170,21 +170,21 @@ impl InboundLane { relayer_at_bridged_chain: &S::Relayer, nonce: MessageNonce, message_data: DispatchMessageData, - ) -> ReceivalResult { + ) -> ReceptionResult { let mut data = self.storage.get_or_init_data(); if Some(nonce) != data.last_delivered_nonce().checked_add(1) { - return ReceivalResult::InvalidNonce + return ReceptionResult::InvalidNonce } // if there are more unrewarded relayer entries than we may accept, reject this message if data.relayers.len() as MessageNonce >= self.storage.max_unrewarded_relayer_entries() { - return ReceivalResult::TooManyUnrewardedRelayers + return ReceptionResult::TooManyUnrewardedRelayers } // if there are more unconfirmed messages than we may accept, reject this message let unconfirmed_messages_count = nonce.saturating_sub(data.last_confirmed_nonce); if unconfirmed_messages_count > self.storage.max_unconfirmed_messages() { - return ReceivalResult::TooManyUnconfirmedMessages + return ReceptionResult::TooManyUnconfirmedMessages } // then, dispatch message @@ -207,7 +207,7 @@ impl InboundLane { }; self.storage.set_data(data); - ReceivalResult::Dispatched(dispatch_result) + ReceptionResult::Dispatched(dispatch_result) } } @@ -235,7 +235,7 @@ mod tests { nonce, inbound_message_data(REGULAR_PAYLOAD) ), - ReceivalResult::Dispatched(dispatch_result(0)) + ReceptionResult::Dispatched(dispatch_result(0)) ); } @@ -362,7 +362,7 @@ mod tests { 10, inbound_message_data(REGULAR_PAYLOAD) ), - ReceivalResult::InvalidNonce + ReceptionResult::InvalidNonce ); assert_eq!(lane.storage.get_or_init_data().last_delivered_nonce(), 0); }); @@ -381,7 +381,7 @@ mod tests { current_nonce, inbound_message_data(REGULAR_PAYLOAD) ), - ReceivalResult::Dispatched(dispatch_result(0)) + ReceptionResult::Dispatched(dispatch_result(0)) ); } // Fails to dispatch new message from different than latest relayer. @@ -391,7 +391,7 @@ mod tests { max_nonce + 1, inbound_message_data(REGULAR_PAYLOAD) ), - ReceivalResult::TooManyUnrewardedRelayers, + ReceptionResult::TooManyUnrewardedRelayers, ); // Fails to dispatch new messages from latest relayer. Prevents griefing attacks. assert_eq!( @@ -400,7 +400,7 @@ mod tests { max_nonce + 1, inbound_message_data(REGULAR_PAYLOAD) ), - ReceivalResult::TooManyUnrewardedRelayers, + ReceptionResult::TooManyUnrewardedRelayers, ); }); } @@ -417,7 +417,7 @@ mod tests { current_nonce, inbound_message_data(REGULAR_PAYLOAD) ), - ReceivalResult::Dispatched(dispatch_result(0)) + ReceptionResult::Dispatched(dispatch_result(0)) ); } // Fails to dispatch new message from different than latest relayer. @@ -427,7 +427,7 @@ mod tests { max_nonce + 1, inbound_message_data(REGULAR_PAYLOAD) ), - ReceivalResult::TooManyUnconfirmedMessages, + ReceptionResult::TooManyUnconfirmedMessages, ); // Fails to dispatch new messages from latest relayer. assert_eq!( @@ -436,7 +436,7 @@ mod tests { max_nonce + 1, inbound_message_data(REGULAR_PAYLOAD) ), - ReceivalResult::TooManyUnconfirmedMessages, + ReceptionResult::TooManyUnconfirmedMessages, ); }); } @@ -451,7 +451,7 @@ mod tests { 1, inbound_message_data(REGULAR_PAYLOAD) ), - ReceivalResult::Dispatched(dispatch_result(0)) + ReceptionResult::Dispatched(dispatch_result(0)) ); assert_eq!( lane.receive_message::( @@ -459,7 +459,7 @@ mod tests { 2, inbound_message_data(REGULAR_PAYLOAD) ), - ReceivalResult::Dispatched(dispatch_result(0)) + ReceptionResult::Dispatched(dispatch_result(0)) ); assert_eq!( lane.receive_message::( @@ -467,7 +467,7 @@ mod tests { 3, inbound_message_data(REGULAR_PAYLOAD) ), - ReceivalResult::Dispatched(dispatch_result(0)) + ReceptionResult::Dispatched(dispatch_result(0)) ); assert_eq!( lane.storage.get_or_init_data().relayers, @@ -490,7 +490,7 @@ mod tests { 1, inbound_message_data(REGULAR_PAYLOAD) ), - ReceivalResult::Dispatched(dispatch_result(0)) + ReceptionResult::Dispatched(dispatch_result(0)) ); assert_eq!( lane.receive_message::( @@ -498,7 +498,7 @@ mod tests { 1, inbound_message_data(REGULAR_PAYLOAD) ), - ReceivalResult::InvalidNonce, + ReceptionResult::InvalidNonce, ); }); } @@ -524,7 +524,7 @@ mod tests { 1, inbound_message_data(payload) ), - ReceivalResult::Dispatched(dispatch_result(1)) + ReceptionResult::Dispatched(dispatch_result(1)) ); }); } diff --git a/bridges/modules/messages/src/lib.rs b/bridges/modules/messages/src/lib.rs index a86cb326cf0404512b7fe6ad0aa2a696ff7d0a47..bc00db9eba5ba12dbdaa0de7008f293a727a7ef5 100644 --- a/bridges/modules/messages/src/lib.rs +++ b/bridges/modules/messages/src/lib.rs @@ -47,7 +47,7 @@ pub use weights_ext::{ use crate::{ inbound_lane::{InboundLane, InboundLaneStorage}, - outbound_lane::{OutboundLane, OutboundLaneStorage, ReceivalConfirmationError}, + outbound_lane::{OutboundLane, OutboundLaneStorage, ReceptionConfirmationError}, }; use bp_messages::{ @@ -90,7 +90,7 @@ pub const LOG_TARGET: &str = "runtime::bridge-messages"; #[frame_support::pallet] pub mod pallet { use super::*; - use bp_messages::{ReceivalResult, ReceivedMessages}; + use bp_messages::{ReceivedMessages, ReceptionResult}; use bp_runtime::RangeInclusiveExt; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; @@ -376,13 +376,13 @@ pub mod pallet { // delivery transaction cost anyway. And base cost covers everything except // dispatch, so we have a balance here. let unspent_weight = match &receival_result { - ReceivalResult::Dispatched(dispatch_result) => { + ReceptionResult::Dispatched(dispatch_result) => { valid_messages += 1; dispatch_result.unspent_weight }, - ReceivalResult::InvalidNonce | - ReceivalResult::TooManyUnrewardedRelayers | - ReceivalResult::TooManyUnconfirmedMessages => message_dispatch_weight, + ReceptionResult::InvalidNonce | + ReceptionResult::TooManyUnrewardedRelayers | + ReceptionResult::TooManyUnconfirmedMessages => message_dispatch_weight, }; lane_messages_received_status.push(message.key.nonce, receival_result); @@ -455,7 +455,7 @@ pub mod pallet { last_delivered_nonce, &lane_data.relayers, ) - .map_err(Error::::ReceivalConfirmation)?; + .map_err(Error::::ReceptionConfirmation)?; if let Some(confirmed_messages) = confirmed_messages { // emit 'delivered' event @@ -563,7 +563,7 @@ pub mod pallet { /// The message someone is trying to work with (i.e. increase fee) is not yet sent. MessageIsNotYetSent, /// Error confirming messages receival. - ReceivalConfirmation(ReceivalConfirmationError), + ReceptionConfirmation(ReceptionConfirmationError), /// Error generated by the `OwnedBridgeModule` trait. BridgeModule(bp_runtime::OwnedBridgeModuleError), } @@ -923,7 +923,7 @@ mod tests { PAYLOAD_REJECTED_BY_TARGET_CHAIN, REGULAR_PAYLOAD, TEST_LANE_ID, TEST_LANE_ID_2, TEST_LANE_ID_3, TEST_RELAYER_A, TEST_RELAYER_B, }, - outbound_lane::ReceivalConfirmationError, + outbound_lane::ReceptionConfirmationError, }; use bp_messages::{ source_chain::MessagesBridge, BridgeMessagesCall, UnrewardedRelayer, @@ -950,11 +950,11 @@ mod tests { let outbound_lane = outbound_lane::(lane_id); let message_nonce = outbound_lane.data().latest_generated_nonce + 1; - let prev_enqueud_messages = outbound_lane.data().queued_messages().saturating_len(); + let prev_enqueued_messages = outbound_lane.data().queued_messages().saturating_len(); let valid_message = Pallet::::validate_message(lane_id, ®ULAR_PAYLOAD) .expect("validate_message has failed"); let artifacts = Pallet::::send_message(valid_message); - assert_eq!(artifacts.enqueued_messages, prev_enqueud_messages + 1); + assert_eq!(artifacts.enqueued_messages, prev_enqueued_messages + 1); // check event with assigned nonce assert_eq!( @@ -1541,7 +1541,7 @@ mod tests { } #[test] - fn actual_dispatch_weight_does_not_overlow() { + fn actual_dispatch_weight_does_not_overflow() { run_test(|| { let message1 = message(1, message_payload(0, u64::MAX / 2)); let message2 = message(2, message_payload(0, u64::MAX / 2)); @@ -1775,7 +1775,7 @@ mod tests { // returns `last_confirmed_nonce`; // 3) it means that we're going to confirm delivery of messages 1..=1; // 4) so the number of declared messages (see `UnrewardedRelayersState`) is `0` and - // numer of actually confirmed messages is `1`. + // number of actually confirmed messages is `1`. assert_noop!( Pallet::::receive_messages_delivery_proof( RuntimeOrigin::signed(1), @@ -1785,8 +1785,8 @@ mod tests { ))), UnrewardedRelayersState { last_delivered_nonce: 1, ..Default::default() }, ), - Error::::ReceivalConfirmation( - ReceivalConfirmationError::TryingToConfirmMoreMessagesThanExpected + Error::::ReceptionConfirmation( + ReceptionConfirmationError::TryingToConfirmMoreMessagesThanExpected ), ); }); diff --git a/bridges/modules/messages/src/outbound_lane.rs b/bridges/modules/messages/src/outbound_lane.rs index 431c2cfb7eef3e8dd48e49c6ac37153ae64d57b6..acef5546d2a64fa8a3fb38c6b41ae30819cdeaa2 100644 --- a/bridges/modules/messages/src/outbound_lane.rs +++ b/bridges/modules/messages/src/outbound_lane.rs @@ -53,7 +53,7 @@ pub type StoredMessagePayload = BoundedVec>::MaximalOu /// Result of messages receival confirmation. #[derive(Encode, Decode, RuntimeDebug, PartialEq, Eq, PalletError, TypeInfo)] -pub enum ReceivalConfirmationError { +pub enum ReceptionConfirmationError { /// Bridged chain is trying to confirm more messages than we have generated. May be a result /// of invalid bridged chain storage. FailedToConfirmFutureMessages, @@ -103,7 +103,7 @@ impl OutboundLane { max_allowed_messages: MessageNonce, latest_delivered_nonce: MessageNonce, relayers: &VecDeque>, - ) -> Result, ReceivalConfirmationError> { + ) -> Result, ReceptionConfirmationError> { let mut data = self.storage.data(); let confirmed_messages = DeliveredMessages { begin: data.latest_received_nonce.saturating_add(1), @@ -113,7 +113,7 @@ impl OutboundLane { return Ok(None) } if confirmed_messages.end > data.latest_generated_nonce { - return Err(ReceivalConfirmationError::FailedToConfirmFutureMessages) + return Err(ReceptionConfirmationError::FailedToConfirmFutureMessages) } if confirmed_messages.total_messages() > max_allowed_messages { // that the relayer has declared correct number of messages that the proof contains (it @@ -127,7 +127,7 @@ impl OutboundLane { confirmed_messages.total_messages(), max_allowed_messages, ); - return Err(ReceivalConfirmationError::TryingToConfirmMoreMessagesThanExpected) + return Err(ReceptionConfirmationError::TryingToConfirmMoreMessagesThanExpected) } ensure_unrewarded_relayers_are_correct(confirmed_messages.end, relayers)?; @@ -176,24 +176,24 @@ impl OutboundLane { fn ensure_unrewarded_relayers_are_correct( latest_received_nonce: MessageNonce, relayers: &VecDeque>, -) -> Result<(), ReceivalConfirmationError> { +) -> Result<(), ReceptionConfirmationError> { let mut expected_entry_begin = relayers.front().map(|entry| entry.messages.begin); for entry in relayers { // unrewarded relayer entry must have at least 1 unconfirmed message // (guaranteed by the `InboundLane::receive_message()`) if entry.messages.end < entry.messages.begin { - return Err(ReceivalConfirmationError::EmptyUnrewardedRelayerEntry) + return Err(ReceptionConfirmationError::EmptyUnrewardedRelayerEntry) } // every entry must confirm range of messages that follows previous entry range // (guaranteed by the `InboundLane::receive_message()`) if expected_entry_begin != Some(entry.messages.begin) { - return Err(ReceivalConfirmationError::NonConsecutiveUnrewardedRelayerEntries) + return Err(ReceptionConfirmationError::NonConsecutiveUnrewardedRelayerEntries) } expected_entry_begin = entry.messages.end.checked_add(1); // entry can't confirm messages larger than `inbound_lane_data.latest_received_nonce()` // (guaranteed by the `InboundLane::receive_message()`) if entry.messages.end > latest_received_nonce { - return Err(ReceivalConfirmationError::FailedToConfirmFutureMessages) + return Err(ReceptionConfirmationError::FailedToConfirmFutureMessages) } } @@ -228,7 +228,7 @@ mod tests { fn assert_3_messages_confirmation_fails( latest_received_nonce: MessageNonce, relayers: &VecDeque>, - ) -> Result, ReceivalConfirmationError> { + ) -> Result, ReceptionConfirmationError> { run_test(|| { let mut lane = outbound_lane::(TEST_LANE_ID); lane.send_message(outbound_message_data(REGULAR_PAYLOAD)); @@ -299,7 +299,7 @@ mod tests { fn confirm_delivery_rejects_nonce_larger_than_last_generated() { assert_eq!( assert_3_messages_confirmation_fails(10, &unrewarded_relayers(1..=10),), - Err(ReceivalConfirmationError::FailedToConfirmFutureMessages), + Err(ReceptionConfirmationError::FailedToConfirmFutureMessages), ); } @@ -314,7 +314,7 @@ mod tests { .chain(unrewarded_relayers(3..=3).into_iter()) .collect(), ), - Err(ReceivalConfirmationError::FailedToConfirmFutureMessages), + Err(ReceptionConfirmationError::FailedToConfirmFutureMessages), ); } @@ -330,7 +330,7 @@ mod tests { .chain(unrewarded_relayers(2..=3).into_iter()) .collect(), ), - Err(ReceivalConfirmationError::EmptyUnrewardedRelayerEntry), + Err(ReceptionConfirmationError::EmptyUnrewardedRelayerEntry), ); } @@ -345,7 +345,7 @@ mod tests { .chain(unrewarded_relayers(2..=2).into_iter()) .collect(), ), - Err(ReceivalConfirmationError::NonConsecutiveUnrewardedRelayerEntries), + Err(ReceptionConfirmationError::NonConsecutiveUnrewardedRelayerEntries), ); } @@ -409,11 +409,11 @@ mod tests { lane.send_message(outbound_message_data(REGULAR_PAYLOAD)); assert_eq!( lane.confirm_delivery(0, 3, &unrewarded_relayers(1..=3)), - Err(ReceivalConfirmationError::TryingToConfirmMoreMessagesThanExpected), + Err(ReceptionConfirmationError::TryingToConfirmMoreMessagesThanExpected), ); assert_eq!( lane.confirm_delivery(2, 3, &unrewarded_relayers(1..=3)), - Err(ReceivalConfirmationError::TryingToConfirmMoreMessagesThanExpected), + Err(ReceptionConfirmationError::TryingToConfirmMoreMessagesThanExpected), ); assert_eq!( lane.confirm_delivery(3, 3, &unrewarded_relayers(1..=3)), diff --git a/bridges/modules/parachains/Cargo.toml b/bridges/modules/parachains/Cargo.toml index e454a6f2888fa169a0b0795101172b2f260b4020..35213be0674a8c8d31de79afb720fbf457f3445a 100644 --- a/bridges/modules/parachains/Cargo.toml +++ b/bridges/modules/parachains/Cargo.toml @@ -5,14 +5,15 @@ description = "Module that allows bridged relay chains to exchange information o authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Bridge Dependencies diff --git a/bridges/modules/parachains/src/mock.rs b/bridges/modules/parachains/src/mock.rs index 3af3fd3e76398eaefdb7a9380344371ec44d27d7..d9cbabf850ec99ee13baa0f8bfc013b1192bd000 100644 --- a/bridges/modules/parachains/src/mock.rs +++ b/bridges/modules/parachains/src/mock.rs @@ -261,7 +261,7 @@ impl Chain for TestBridgedChain { impl ChainWithGrandpa for TestBridgedChain { const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = ""; const MAX_AUTHORITIES_COUNT: u32 = 16; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = 8; + const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = 8; const MAX_MANDATORY_HEADER_SIZE: u32 = 256; const AVERAGE_HEADER_SIZE: u32 = 64; } @@ -294,7 +294,7 @@ impl Chain for OtherBridgedChain { impl ChainWithGrandpa for OtherBridgedChain { const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = ""; const MAX_AUTHORITIES_COUNT: u32 = 16; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = 8; + const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = 8; const MAX_MANDATORY_HEADER_SIZE: u32 = 256; const AVERAGE_HEADER_SIZE: u32 = 64; } diff --git a/bridges/modules/relayers/Cargo.toml b/bridges/modules/relayers/Cargo.toml index b78da5cbeeca65a4f448cbc38928894d51e8f7b4..e2b7aca92249c19096bf129be6fab1be08a5357a 100644 --- a/bridges/modules/relayers/Cargo.toml +++ b/bridges/modules/relayers/Cargo.toml @@ -5,14 +5,15 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Bridge dependencies diff --git a/bridges/modules/xcm-bridge-hub-router/Cargo.toml b/bridges/modules/xcm-bridge-hub-router/Cargo.toml index 20f8ff4407b2ad9882c64b334fa557a6c7dc4ef2..06f2a339bed9d07b5615ca047177228d6585cfc2 100644 --- a/bridges/modules/xcm-bridge-hub-router/Cargo.toml +++ b/bridges/modules/xcm-bridge-hub-router/Cargo.toml @@ -5,14 +5,15 @@ version = "0.5.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive", "serde"] } # Bridge dependencies diff --git a/bridges/modules/xcm-bridge-hub-router/src/lib.rs b/bridges/modules/xcm-bridge-hub-router/src/lib.rs index f219be78f9e1b5469fb752eed3f662c954d0ec42..5d0be41b1b5588e3ddc8c6306c9bf83ec29d6056 100644 --- a/bridges/modules/xcm-bridge-hub-router/src/lib.rs +++ b/bridges/modules/xcm-bridge-hub-router/src/lib.rs @@ -427,7 +427,7 @@ mod tests { run_test(|| { Bridge::::put(uncongested_bridge(FixedU128::from_rational(125, 100))); - // it shold eventually decreased to one + // it should eventually decreased to one while XcmBridgeHubRouter::bridge().delivery_fee_factor > MINIMAL_DELIVERY_FEE_FACTOR { XcmBridgeHubRouter::on_initialize(One::one()); } diff --git a/bridges/modules/xcm-bridge-hub/Cargo.toml b/bridges/modules/xcm-bridge-hub/Cargo.toml index e10119e864953f1777c43151092ae43a5e594b8c..4483a3790900f975030e8c820f9b42442a747262 100644 --- a/bridges/modules/xcm-bridge-hub/Cargo.toml +++ b/bridges/modules/xcm-bridge-hub/Cargo.toml @@ -1,18 +1,19 @@ [package] name = "pallet-xcm-bridge-hub" -description = "Module that adds dynamic bridges/lanes support to XCM infrastucture at the bridge hub." +description = "Module that adds dynamic bridges/lanes support to XCM infrastructure at the bridge hub." version = "0.2.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Bridge Dependencies bp-messages = { path = "../../primitives/messages", default-features = false } diff --git a/bridges/primitives/header-chain/Cargo.toml b/bridges/primitives/header-chain/Cargo.toml index 205b593365ef8216a2e501e5751303185d4f7537..f7a61a9ff32bd42f4199859834b6296aeaa18f4a 100644 --- a/bridges/primitives/header-chain/Cargo.toml +++ b/bridges/primitives/header-chain/Cargo.toml @@ -5,14 +5,15 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } finality-grandpa = { version = "0.16.2", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], workspace = true } # Bridge dependencies diff --git a/bridges/primitives/header-chain/src/justification/mod.rs b/bridges/primitives/header-chain/src/justification/mod.rs index b32d8bdb5f1d8ce05722c938a083d7f582139835..d7c2cbf429e2b01efe4a9ea2481e66e2857d0044 100644 --- a/bridges/primitives/header-chain/src/justification/mod.rs +++ b/bridges/primitives/header-chain/src/justification/mod.rs @@ -83,7 +83,7 @@ impl GrandpaJustification { .saturating_add(HashOf::::max_encoded_len().saturated_into()); let max_expected_votes_ancestries_size = - C::REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY.saturating_mul(C::AVERAGE_HEADER_SIZE); + C::REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY.saturating_mul(C::AVERAGE_HEADER_SIZE); // justification is round number (u64=8b), a signed GRANDPA commit and the // `votes_ancestries` vector diff --git a/bridges/primitives/header-chain/src/justification/verification/mod.rs b/bridges/primitives/header-chain/src/justification/verification/mod.rs index c71149bf9c28e350fb43429623ca47cd367b9091..9df3511e1035ef769e5ef6c373253161be041efb 100644 --- a/bridges/primitives/header-chain/src/justification/verification/mod.rs +++ b/bridges/primitives/header-chain/src/justification/verification/mod.rs @@ -318,7 +318,7 @@ trait JustificationVerifier { } // check that the cumulative weight of validators that voted for the justification target - // (or one of its descendents) is larger than the required threshold. + // (or one of its descendants) is larger than the required threshold. if cumulative_weight < threshold { return Err(Error::TooLowCumulativeWeight) } diff --git a/bridges/primitives/header-chain/src/lib.rs b/bridges/primitives/header-chain/src/lib.rs index 84a6a881a835b8afc3b5cde8992df1733859d29a..98fb9ff83d8335fc04fbce7f9e566c73d15752a8 100644 --- a/bridges/primitives/header-chain/src/lib.rs +++ b/bridges/primitives/header-chain/src/lib.rs @@ -283,7 +283,7 @@ pub trait ChainWithGrandpa: Chain { /// ancestry and the pallet will accept such justification. The limit is only used to compute /// maximal refund amount and submitting justifications which exceed the limit, may be costly /// to submitter. - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32; + const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32; /// Maximal size of the mandatory chain header. Mandatory header is the header that enacts new /// GRANDPA authorities set (so it has large digest inside). @@ -317,8 +317,8 @@ where const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = ::WITH_CHAIN_GRANDPA_PALLET_NAME; const MAX_AUTHORITIES_COUNT: u32 = ::MAX_AUTHORITIES_COUNT; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = - ::REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY; + const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = + ::REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY; const MAX_MANDATORY_HEADER_SIZE: u32 = ::MAX_MANDATORY_HEADER_SIZE; const AVERAGE_HEADER_SIZE: u32 = ::AVERAGE_HEADER_SIZE; @@ -373,7 +373,7 @@ mod tests { impl ChainWithGrandpa for TestChain { const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = "Test"; const MAX_AUTHORITIES_COUNT: u32 = 128; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = 2; + const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = 2; const MAX_MANDATORY_HEADER_SIZE: u32 = 100_000; const AVERAGE_HEADER_SIZE: u32 = 1_024; } diff --git a/bridges/primitives/messages/Cargo.toml b/bridges/primitives/messages/Cargo.toml index 8aa6b4b05e5efb2427a8548e91ec5f47ab494968..d41acfb9d32863d14e56e095755791a420fd3ce6 100644 --- a/bridges/primitives/messages/Cargo.toml +++ b/bridges/primitives/messages/Cargo.toml @@ -5,13 +5,14 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["bit-vec", "derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive"] } serde = { features = ["alloc", "derive"], workspace = true } # Bridge dependencies diff --git a/bridges/primitives/messages/src/lib.rs b/bridges/primitives/messages/src/lib.rs index 51b3f25f7151867b52e8e5f49bc70b0a3632c05e..c3f79b3ee388c4584def56056f6cdf6328032e18 100644 --- a/bridges/primitives/messages/src/lib.rs +++ b/bridges/primitives/messages/src/lib.rs @@ -289,27 +289,27 @@ pub struct ReceivedMessages { /// Id of the lane which is receiving messages. pub lane: LaneId, /// Result of messages which we tried to dispatch - pub receive_results: Vec<(MessageNonce, ReceivalResult)>, + pub receive_results: Vec<(MessageNonce, ReceptionResult)>, } impl ReceivedMessages { /// Creates new `ReceivedMessages` structure from given results. pub fn new( lane: LaneId, - receive_results: Vec<(MessageNonce, ReceivalResult)>, + receive_results: Vec<(MessageNonce, ReceptionResult)>, ) -> Self { ReceivedMessages { lane, receive_results } } /// Push `result` of the `message` delivery onto `receive_results` vector. - pub fn push(&mut self, message: MessageNonce, result: ReceivalResult) { + pub fn push(&mut self, message: MessageNonce, result: ReceptionResult) { self.receive_results.push((message, result)); } } /// Result of single message receival. #[derive(RuntimeDebug, Encode, Decode, PartialEq, Eq, Clone, TypeInfo)] -pub enum ReceivalResult { +pub enum ReceptionResult { /// Message has been received and dispatched. Note that we don't care whether dispatch has /// been successful or not - in both case message falls into this category. /// diff --git a/bridges/primitives/parachains/Cargo.toml b/bridges/primitives/parachains/Cargo.toml index 575f26193eb68643c5c0a5fe6376d8b735ee2840..2e7000b86a5e4ba21ccadf74b1c2d5374db5c545 100644 --- a/bridges/primitives/parachains/Cargo.toml +++ b/bridges/primitives/parachains/Cargo.toml @@ -5,14 +5,15 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2" -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Bridge dependencies diff --git a/bridges/primitives/polkadot-core/Cargo.toml b/bridges/primitives/polkadot-core/Cargo.toml index c0dae684b5f2f3b7b9be096a808fc67d15dadfcf..53b1e574cb1997e556f17b7f21f6a28d9eb84400 100644 --- a/bridges/primitives/polkadot-core/Cargo.toml +++ b/bridges/primitives/polkadot-core/Cargo.toml @@ -5,14 +5,15 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } parity-util-mem = { version = "0.12.0", optional = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } # Bridge Dependencies diff --git a/bridges/primitives/polkadot-core/src/lib.rs b/bridges/primitives/polkadot-core/src/lib.rs index df2836495bbe131e9cf810c43eb4af5eefaf43b7..e83be59b23890036905ae1abb441c454c2ce29a5 100644 --- a/bridges/primitives/polkadot-core/src/lib.rs +++ b/bridges/primitives/polkadot-core/src/lib.rs @@ -71,7 +71,7 @@ pub const MAX_AUTHORITIES_COUNT: u32 = 1_256; /// justifications with any additional headers in votes ancestry, so reasonable headers may /// be set to zero. But we assume that there may be small GRANDPA lags, so we're leaving some /// reserve here. -pub const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = 2; +pub const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = 2; /// Average header size in `votes_ancestries` field of justification on Polkadot-like /// chains. diff --git a/bridges/primitives/relayers/Cargo.toml b/bridges/primitives/relayers/Cargo.toml index 3bd6809d2789e0b3aced5b8d96448b63e1074ee4..1be7f1dc6ebd38061e98865b45d4f85d8f3f7448 100644 --- a/bridges/primitives/relayers/Cargo.toml +++ b/bridges/primitives/relayers/Cargo.toml @@ -5,13 +5,14 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["bit-vec", "derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive"] } # Bridge Dependencies diff --git a/bridges/primitives/runtime/Cargo.toml b/bridges/primitives/runtime/Cargo.toml index 22206fb2c376ce53fee9dc8ff806baaef3ce7c28..cca9c21a608d7c8892a01864a7153af7e63ea520 100644 --- a/bridges/primitives/runtime/Cargo.toml +++ b/bridges/primitives/runtime/Cargo.toml @@ -5,17 +5,18 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } hash-db = { version = "0.16.0", default-features = false } impl-trait-for-tuples = "0.2.2" log = { workspace = true } num-traits = { version = "0.2", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], workspace = true } # Substrate Dependencies diff --git a/bridges/primitives/runtime/src/chain.rs b/bridges/primitives/runtime/src/chain.rs index 9ba21a1cddf13896b21494045cea7fdd92259ce8..4ec5a001a99ecad21617ed0afc57d3edac383d0d 100644 --- a/bridges/primitives/runtime/src/chain.rs +++ b/bridges/primitives/runtime/src/chain.rs @@ -104,7 +104,7 @@ pub trait Chain: Send + Sync + 'static { const ID: ChainId; /// A type that fulfills the abstract idea of what a Substrate block number is. - // Constraits come from the associated Number type of `sp_runtime::traits::Header` + // Constraints come from the associated Number type of `sp_runtime::traits::Header` // See here for more info: // https://crates.parity.io/sp_runtime/traits/trait.Header.html#associatedtype.Number // @@ -125,7 +125,7 @@ pub trait Chain: Send + Sync + 'static { + MaxEncodedLen; /// A type that fulfills the abstract idea of what a Substrate hash is. - // Constraits come from the associated Hash type of `sp_runtime::traits::Header` + // Constraints come from the associated Hash type of `sp_runtime::traits::Header` // See here for more info: // https://crates.parity.io/sp_runtime/traits/trait.Header.html#associatedtype.Hash type Hash: Parameter @@ -143,7 +143,7 @@ pub trait Chain: Send + Sync + 'static { /// A type that fulfills the abstract idea of what a Substrate hasher (a type /// that produces hashes) is. - // Constraits come from the associated Hashing type of `sp_runtime::traits::Header` + // Constraints come from the associated Hashing type of `sp_runtime::traits::Header` // See here for more info: // https://crates.parity.io/sp_runtime/traits/trait.Header.html#associatedtype.Hashing type Hasher: HashT; diff --git a/bridges/primitives/runtime/src/lib.rs b/bridges/primitives/runtime/src/lib.rs index 850318923dc7671c26cc3edcf2f9d59bd7b987b9..c9c5c9412913b0470024e9e1473e5d69ff184f25 100644 --- a/bridges/primitives/runtime/src/lib.rs +++ b/bridges/primitives/runtime/src/lib.rs @@ -56,7 +56,7 @@ mod chain; mod storage_proof; mod storage_types; -// Re-export macro to aviod include paste dependency everywhere +// Re-export macro to avoid include paste dependency everywhere pub use sp_runtime::paste; /// Use this when something must be shared among all instances. @@ -461,7 +461,7 @@ macro_rules! generate_static_str_provider { }; } -/// Error message that is only dispayable in `std` environment. +/// Error message that is only displayable in `std` environment. #[derive(Encode, Decode, Clone, Eq, PartialEq, PalletError, TypeInfo)] #[scale_info(skip_type_params(T))] pub struct StrippableError { diff --git a/bridges/primitives/test-utils/Cargo.toml b/bridges/primitives/test-utils/Cargo.toml index d379e950b86ef6f98754c651c980291c9b9c4012..d314c38683cdbc8b40cfda3a14c64f91854e5d7f 100644 --- a/bridges/primitives/test-utils/Cargo.toml +++ b/bridges/primitives/test-utils/Cargo.toml @@ -5,6 +5,7 @@ description = "Utilities for testing substrate-based runtime bridge code" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true @@ -14,7 +15,7 @@ bp-header-chain = { path = "../header-chain", default-features = false } bp-parachains = { path = "../parachains", default-features = false } bp-polkadot-core = { path = "../polkadot-core", default-features = false } bp-runtime = { path = "../runtime", default-features = false } -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } ed25519-dalek = { version = "2.1", default-features = false } finality-grandpa = { version = "0.16.2", default-features = false } sp-application-crypto = { path = "../../../substrate/primitives/application-crypto", default-features = false } diff --git a/bridges/primitives/test-utils/src/lib.rs b/bridges/primitives/test-utils/src/lib.rs index 1d80890779bf8310b393d585749e96f9577196a1..f4fe4a242e79c0e1c8a499c4dd18ed4a2164c656 100644 --- a/bridges/primitives/test-utils/src/lib.rs +++ b/bridges/primitives/test-utils/src/lib.rs @@ -88,7 +88,7 @@ pub fn make_default_justification(header: &H) -> GrandpaJustificatio /// Generate justifications in a way where we are able to tune the number of pre-commits /// and vote ancestries which are included in the justification. /// -/// This is useful for benchmarkings where we want to generate valid justifications with +/// This is useful for benchmarks where we want to generate valid justifications with /// a specific number of pre-commits (tuned with the number of "authorities") and/or a specific /// number of vote ancestries (tuned with the "votes" parameter). /// diff --git a/bridges/primitives/xcm-bridge-hub-router/Cargo.toml b/bridges/primitives/xcm-bridge-hub-router/Cargo.toml index 9297a8603c0aa407e3dc5b860e21a0c227cf1bcc..94eece16d5797eb23dd12af5b8b5aeb7d283d862 100644 --- a/bridges/primitives/xcm-bridge-hub-router/Cargo.toml +++ b/bridges/primitives/xcm-bridge-hub-router/Cargo.toml @@ -5,13 +5,14 @@ version = "0.6.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["bit-vec", "derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive"] } # Substrate Dependencies sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } diff --git a/bridges/primitives/xcm-bridge-hub/Cargo.toml b/bridges/primitives/xcm-bridge-hub/Cargo.toml index ad49ec1e83152f60b8386d97d522c798ee87e618..27881bc99d1f838bb5a72c02fe565ef5dc0307fd 100644 --- a/bridges/primitives/xcm-bridge-hub/Cargo.toml +++ b/bridges/primitives/xcm-bridge-hub/Cargo.toml @@ -5,6 +5,7 @@ version = "0.2.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true diff --git a/bridges/scripts/verify-pallets-build.sh b/bridges/scripts/verify-pallets-build.sh index b96bbf1833b6b3ce2bb34d2dc34aa5b8f54eb528..9c57a2a3c476006a636b4944648dfdf196de3591 100755 --- a/bridges/scripts/verify-pallets-build.sh +++ b/bridges/scripts/verify-pallets-build.sh @@ -68,6 +68,7 @@ rm -rf $BRIDGES_FOLDER/modules/beefy rm -rf $BRIDGES_FOLDER/modules/shift-session-manager rm -rf $BRIDGES_FOLDER/primitives/beefy rm -rf $BRIDGES_FOLDER/relays +rm -rf $BRIDGES_FOLDER/relay-clients rm -rf $BRIDGES_FOLDER/scripts/add_license.sh rm -rf $BRIDGES_FOLDER/scripts/build-containers.sh rm -rf $BRIDGES_FOLDER/scripts/ci-cache.sh @@ -77,6 +78,7 @@ rm -rf $BRIDGES_FOLDER/scripts/regenerate_runtimes.sh rm -rf $BRIDGES_FOLDER/scripts/update-weights.sh rm -rf $BRIDGES_FOLDER/scripts/update-weights-setup.sh rm -rf $BRIDGES_FOLDER/scripts/update_substrate.sh +rm -rf $BRIDGES_FOLDER/substrate-relay rm -rf $BRIDGES_FOLDER/tools rm -f $BRIDGES_FOLDER/.dockerignore rm -f $BRIDGES_FOLDER/local.Dockerfile.dockerignore @@ -89,6 +91,7 @@ rm -f $BRIDGES_FOLDER/local.Dockerfile rm -f $BRIDGES_FOLDER/CODEOWNERS rm -f $BRIDGES_FOLDER/Dockerfile rm -f $BRIDGES_FOLDER/rustfmt.toml +rm -f $BRIDGES_FOLDER/RELEASE.md # let's fix Cargo.toml a bit (it'll be helpful if we are in the bridges repo) if [[ ! -f "Cargo.toml" ]]; then @@ -131,7 +134,7 @@ cargo check -p bridge-runtime-common cargo check -p bridge-runtime-common --features runtime-benchmarks cargo check -p bridge-runtime-common --features integrity-test -# we're removing lock file after all chechs are done. Otherwise we may use different +# we're removing lock file after all checks are done. Otherwise we may use different # Substrate/Polkadot/Cumulus commits and our checks will fail rm -f $BRIDGES_FOLDER/Cargo.lock diff --git a/bridges/snowbridge/pallets/ethereum-client/Cargo.toml b/bridges/snowbridge/pallets/ethereum-client/Cargo.toml index c8999633c97abb00174e38e16ed5618e7baf0b59..cadd542432e775e273f5024928d2162f645a77e2 100644 --- a/bridges/snowbridge/pallets/ethereum-client/Cargo.toml +++ b/bridges/snowbridge/pallets/ethereum-client/Cargo.toml @@ -36,7 +36,7 @@ sp-io = { path = "../../../../substrate/primitives/io", default-features = false snowbridge-core = { path = "../../primitives/core", default-features = false } snowbridge-ethereum = { path = "../../primitives/ethereum", default-features = false } -snowbridge-pallet-ethereum-client-fixtures = { path = "./fixtures", default-features = false, optional = true } +snowbridge-pallet-ethereum-client-fixtures = { path = "fixtures", default-features = false, optional = true } primitives = { package = "snowbridge-beacon-primitives", path = "../../primitives/beacon", default-features = false } static_assertions = { version = "1.1.0", default-features = false } bp-runtime = { path = "../../../primitives/runtime", default-features = false } @@ -48,7 +48,7 @@ sp-keyring = { path = "../../../../substrate/primitives/keyring" } serde_json = { workspace = true, default-features = true } hex-literal = "0.4.1" pallet-timestamp = { path = "../../../../substrate/frame/timestamp" } -snowbridge-pallet-ethereum-client-fixtures = { path = "./fixtures" } +snowbridge-pallet-ethereum-client-fixtures = { path = "fixtures" } sp-io = { path = "../../../../substrate/primitives/io" } serde = { workspace = true, default-features = true } diff --git a/bridges/snowbridge/pallets/ethereum-client/fixtures/src/lib.rs b/bridges/snowbridge/pallets/ethereum-client/fixtures/src/lib.rs index facaffb8149cd88b0f3f369ed79dd922e9b2c983..37fe45ba60b0be1311da2d6090283acb6710383e 100644 --- a/bridges/snowbridge/pallets/ethereum-client/fixtures/src/lib.rs +++ b/bridges/snowbridge/pallets/ethereum-client/fixtures/src/lib.rs @@ -6,9 +6,10 @@ use hex_literal::hex; use snowbridge_beacon_primitives::{ - types::deneb, updates::AncestryProof, BeaconHeader, ExecutionHeaderUpdate, - NextSyncCommitteeUpdate, SyncAggregate, SyncCommittee, VersionedExecutionPayloadHeader, + types::deneb, AncestryProof, BeaconHeader, ExecutionProof, NextSyncCommitteeUpdate, + SyncAggregate, SyncCommittee, VersionedExecutionPayloadHeader, }; +use snowbridge_core::inbound::{InboundQueueFixture, Log, Message, Proof}; use sp_core::U256; use sp_std::{boxed::Box, vec}; @@ -20,11 +21,11 @@ type Update = snowbridge_beacon_primitives::Update; pub fn make_checkpoint() -> Box { Box::new(CheckpointUpdate { header: BeaconHeader { - slot: 2496, - proposer_index: 2, - parent_root: hex!("c99e49787106733eeebab4d93eb326e1f2214575c9d928f0c4ab0da0776f1622").into(), - state_root: hex!("fbf8a08c86ef36bd173e37e733da4a78aa8e85fee99a990e858dd12a59087fde").into(), - body_root: hex!("a2a8ad06901447b2807a9059580a4c40d8a941f325b1343c69f7c7c6c90e4ab0").into(), + slot: 864, + proposer_index: 4, + parent_root: hex!("614e7672f991ac268cd841055973f55e1e42228831a211adef207bb7329be614").into(), + state_root: hex!("5fa8dfca3d760e4242ab46d529144627aa85348a19173b6e081172c701197a4a").into(), + body_root: hex!("0f34c083b1803666bb1ac5e73fa71582731a2cf37d279ff0a3b0cad5a2ff371e").into(), }, current_sync_committee: SyncCommittee { pubkeys: [ @@ -544,20 +545,20 @@ pub fn make_checkpoint() -> Box { aggregate_pubkey: hex!("8fbd66eeec2ff69ef0b836f04b1d67d88bcd4dfd495061964ad757c77abe822a39fa1cd8ed0d4d9bc9276cea73fd745c").into(), }, current_sync_committee_branch: vec![ - hex!("3ade38d498a062b50880a9409e1ca3a7fd4315d91eeb3bb83e56ac6bfe8d6a59").into(), - hex!("93880225bf99a0c5ec22b266ff829837754e9c5edf37a68c05b8f803fd82fa45").into(), - hex!("4c60656ec9a95fcf11030ad309c716b5b15beb7f60a0bcfc7c9d4eff505472ff").into(), - hex!("22d1645fceb4bf9a695043dda19a53e784ec70df6a6b1bd66ea30eba1cca5f2f").into(), - hex!("a8fc6cad84ceefc633ec56c2d031d525e1cb4b51c70eb252919fce5bba9a1fde").into(), + hex!("3ade38d498a062b50880a9409e1ca3a7fd4315d91eeb3bb83e56ac6bfe8d6a59").into(), + hex!("a9e90f89e7f90fd5d79a6bbcaf40ba5cfc05ab1b561ac51c84867c32248d5b1e").into(), + hex!("bd1a76b03e02402bb24a627de1980a80ab17691980271f597b844b89b497ef75").into(), + hex!("07bbcd27c7cad089023db046eda17e8209842b7d97add8b873519e84fe6480e7").into(), + hex!("94c11eeee4cb6192bf40810f23486d8c75dfbc2b6f28d988d6f74435ede243b0").into(), ], validators_root: hex!("270d43e74ce340de4bca2b1936beca0f4f5408d9e78aec4850920baf659d5b69").into(), - block_roots_root: hex!("d160b7687041891b73e54b06fc4e04f82d0fa8fdd76705895e216c6b24709dfe").into(), + block_roots_root: hex!("b9aab9c388c4e4fcd899b71f62c498fc73406e38e8eb14aa440e9affa06f2a10").into(), block_roots_branch: vec![ - hex!("105290e42d98ab6a0ada6e55453cede36c672abf645eeb986b88d7487616e135").into(), - hex!("9da41f274bcdf6122335443d9ce94d07163b48dba3e2f9499ff56f4e48b48b99").into(), - hex!("ecea7e1d3152d8130e83afdfe34b4de4ba2b69a33c9471991096daf454de9cf5").into(), - hex!("b2bf1758e50b2bfff29169fbc70fdb884b2b05bb615dbc53567574da6f4f1ae2").into(), - hex!("cd87069daf70975779126d6af833b7d636c75ca4d5e750ebcad0e76408a5e5bf").into(), + hex!("733422bd810895dab74cbbe07c69dd440cbb51f573181ad4dddac30fcdd0f41f").into(), + hex!("9b9eca73ab01d14549c325ba1b4610bb20bf1f8ec2dbd649f9d8cc7f3cea75fa").into(), + hex!("bcc666ad0ad9f9725cbd682bc95589d35b1b53b2a615f1e6e8dd5e086336becf").into(), + hex!("3069b547a08f703a1715016e926cbd64e71f93f64fb68d98d8c8f1ab745c46e5").into(), + hex!("c2de7e1097239404e17b263cfa0473533cc41e903cb03440d633bc5c27314cb4").into(), ], }) } @@ -567,13 +568,13 @@ pub fn make_sync_committee_update() -> Box { attested_header: BeaconHeader { slot: 129, proposer_index: 5, - parent_root: hex!("e32b6c18f029e755b0273dc1c4fa2bc4979794c8286ad40276c1b8a8e36049d8").into(), - state_root: hex!("5ec9dacf25a5f09f20be0c59246b3d8dcfe64bd085b4bac5cec180690339801e").into(), - body_root: hex!("4080cf2412d6ff77fc3164ad6155423a7112f207f173145ec16371a93f481f87").into(), + parent_root: hex!("c2def03fe44a2802130ca1a6d8406e4ccf4f344fec7075d4d84431cd4a8b0904").into(), + state_root: hex!("fa62cde6666add7353d7aedcb61ebe3c6c84b5361e34f814825b1250affb5be4").into(), + body_root: hex!("0f9c69f243fe7b5fa5860396c66c720a9e8b1e526e7914188930497cc4a9134c").into(), }, sync_aggregate: SyncAggregate{ sync_committee_bits: hex!("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), - sync_committee_signature: hex!("a761c3333fbb3d36bc8f65454f898da38001499dcd37494cf3d86940a995399ae649216ba4c985af154f83f72c8b1856079b7636a7a8d7d3f7602df2cbf699edb72b65253e82de4d9cc4db7377eafb22f799129f63f094a21c00675bdd5cc243").into(), + sync_committee_signature: hex!("810cfde2afea3e276256c09bdf1cd321c33dcadeefddcfd24f488e6f756d917cfda90b5b437b3a4b4ef880985afa28a40cf565ec0a82877ddee36adc01d55d9d4a911ae3e22556e4c2636f1c707366fba019fb49450440fcd263d0b054b04bf0").into(), }, signature_slot: 130, next_sync_committee_update: Some(NextSyncCommitteeUpdate { @@ -1096,34 +1097,34 @@ pub fn make_sync_committee_update() -> Box { }, next_sync_committee_branch: vec![ hex!("3ade38d498a062b50880a9409e1ca3a7fd4315d91eeb3bb83e56ac6bfe8d6a59").into(), - hex!("fd1e5ff5d4a15081efe3ff17857b1f95984c9a271b1c41c2f81f43e60c2cc541").into(), - hex!("e1c97f93bb7352d395d1ff8ee29881572cb7eb5d71634783701171dcd30cd93d").into(), - hex!("77fa2170ddbd89b15dae02f2e6cf9f76c8e00d1c4217320acffbe01576d0da61").into(), - hex!("e97288e0627219087a024078d69445f34f0583a6350a7c3c40c39fd1fa6f8d68").into(), + hex!("43276bee17fc9fba3f4866e902f0e5b5b308d79db91154bb8bf819973837a7d9").into(), + hex!("5572348e13ce59446ca0ea7cfeed07579da05f121920a76559e19bda94dd81cd").into(), + hex!("2d58adca9f3c742530de037f1933d6de1920ea4b68581613d4bc32b71547f221").into(), + hex!("7072b3c6577cd5a89b3234968f316f54630bb97eafbdb59e5b61637a9640255f").into(), ], }), finalized_header: BeaconHeader{ slot: 64, proposer_index: 4, - parent_root: hex!("0f7bc2353778c14c7f6dba0fc5fe6eec87228b0d3a5447b61dce67b4d9338de3").into(), - state_root: hex!("feb990de653ce494c0a263f820eaf05a9300dbdc30cb6065ede602827bfccde4").into(), - body_root: hex!("f5235cd8c24f2695fc5b7989926305c10ad8cf5a87d62a739f675f5543df2ec1").into(), + parent_root: hex!("a876486aaad7ddb897f369fd22d0a9903cd61d00c9e0dfe7998dd68d1008c678").into(), + state_root: hex!("818e21c3388575f8ccc9ff17ec79d5a57915bcd31bccf47770f65a18e068416b").into(), + body_root: hex!("1d1f73b864b3bb7e11ff91b56ca1381e0f9ca8122b2c542db88243604c763019").into(), }, finality_branch: vec![ hex!("0200000000000000000000000000000000000000000000000000000000000000").into(), hex!("10c726fac935bf9657cc7476d3cfa7bedec5983dcfb59e8a7df6d0a619e108d7").into(), hex!("98e9116c6bb7f20de18800dc63e73e689d06d6a47d35b5e2b32cf093d475840d").into(), - hex!("e1c97f93bb7352d395d1ff8ee29881572cb7eb5d71634783701171dcd30cd93d").into(), - hex!("77fa2170ddbd89b15dae02f2e6cf9f76c8e00d1c4217320acffbe01576d0da61").into(), - hex!("e97288e0627219087a024078d69445f34f0583a6350a7c3c40c39fd1fa6f8d68").into(), + hex!("5572348e13ce59446ca0ea7cfeed07579da05f121920a76559e19bda94dd81cd").into(), + hex!("2d58adca9f3c742530de037f1933d6de1920ea4b68581613d4bc32b71547f221").into(), + hex!("7072b3c6577cd5a89b3234968f316f54630bb97eafbdb59e5b61637a9640255f").into(), ], - block_roots_root: hex!("6fcdfd1c3fb1bdd421fe59dddfff3855b5ed5e30373887991a0059d019ad12bc").into(), + block_roots_root: hex!("715b08694bef183a6d94b3113d16a7129f89fc3edec85a7e0eaf6ef9153552ef").into(), block_roots_branch: vec![ - hex!("94b59531f172bc24f914bc0c10104ccb158676850f8cc3b47b6ddb7f096ebdd7").into(), - hex!("22470ed9155a938587d44d5fa19217c0f939d8862e504e67cd8cb4d1b960795e").into(), - hex!("feec3ef1a68f93849e71e84f90b99602cccc31868137b6887ca8244a4b979e8e").into(), + hex!("4028c72c71b6ce80ea7d18b2c9471f4e4fa39746261a9921e832a4a2f9bdf7bb").into(), + hex!("75f98062661785d3290b7bd998b64446582baa49210733fd4603e1a97cd45a44").into(), + hex!("6fb757f44052f30c464810f01b0132adfa1a5446d8715b41e9af88eee1ee3e65").into(), hex!("5340ad5877c72dca689ca04bc8fedb78d67a4801d99887937edd8ccd29f87e82").into(), - hex!("f5ff4b0c6190005015889879568f5f0d9c40134c7ec4ffdda47950dcd92395ad").into(), + hex!("f2b3cb56753939a728ccad399a434ca490f018f2f331529ec0d8b2d59c509271").into(), ], }) } @@ -1131,95 +1132,180 @@ pub fn make_sync_committee_update() -> Box { pub fn make_finalized_header_update() -> Box { Box::new(Update { attested_header: BeaconHeader { - slot: 2566, - proposer_index: 6, - parent_root: hex!("6eb9f13a2c496318ce1ab3087bbd872f5c9519a1a7ca8231a2453e3cb523af00").into(), - state_root: hex!("c8cb12766113dff7e46d2917267bf33d0626d99dd47715fcdbc5c65fad3c04b4").into(), - body_root: hex!("d8cfd0d7bc9bc3724417a1655bb0a67c0765ca36197320f4d834150b52ef1420").into(), + slot: 933, + proposer_index: 1, + parent_root: hex!("f5fc63e2780ca302b97aea73fc95d74d702b5afe9a772c2b68f695026337b620").into(), + state_root: hex!("d856d11636bc4d866e78be9e747b222b0977556a367ab42e4085277301438050").into(), + body_root: hex!("5689091ab4eb76c2e876271add4924e1c66ce987c300c24aac2ad8c703e9a33f").into(), }, sync_aggregate: SyncAggregate{ sync_committee_bits: hex!("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), - sync_committee_signature: hex!("9296f9a0387f2cac47008e22ad7c3cd3d49d35384c13e6aa1eacca7dca7c3d2ca81515e50eb3396b9550ed20ef7d8fa2049a186598feb2c00e93728045fcff917733d1827481b8fc95f3913e27fc70112c2490496eb57bb7181f02c3f9fd471f").into(), + sync_committee_signature: hex!("93a3d482fe2a2f7fd2b634169752a8fddf1dc28b23a020b398be8526faf37a74ca0f6db1bed78a9c7256c09a6115235e108e0e8a7ce09287317b0856c4b77dfa5adba6cf4c3ebea5bfa4cd2fcde80fd0a532f2defe65d530201d5d2258796559").into(), }, - signature_slot: 2567, + signature_slot: 934, next_sync_committee_update: None, finalized_header: BeaconHeader { - slot: 2496, - proposer_index: 2, - parent_root: hex!("c99e49787106733eeebab4d93eb326e1f2214575c9d928f0c4ab0da0776f1622").into(), - state_root: hex!("fbf8a08c86ef36bd173e37e733da4a78aa8e85fee99a990e858dd12a59087fde").into(), - body_root: hex!("a2a8ad06901447b2807a9059580a4c40d8a941f325b1343c69f7c7c6c90e4ab0").into(), + slot: 864, + proposer_index: 4, + parent_root: hex!("614e7672f991ac268cd841055973f55e1e42228831a211adef207bb7329be614").into(), + state_root: hex!("5fa8dfca3d760e4242ab46d529144627aa85348a19173b6e081172c701197a4a").into(), + body_root: hex!("0f34c083b1803666bb1ac5e73fa71582731a2cf37d279ff0a3b0cad5a2ff371e").into(), }, finality_branch: vec![ - hex!("4e00000000000000000000000000000000000000000000000000000000000000").into(), + hex!("1b00000000000000000000000000000000000000000000000000000000000000").into(), hex!("10c726fac935bf9657cc7476d3cfa7bedec5983dcfb59e8a7df6d0a619e108d7").into(), hex!("98e9116c6bb7f20de18800dc63e73e689d06d6a47d35b5e2b32cf093d475840d").into(), - hex!("958b8e43347f6df6fa5eb3d62d06a862381a6585aa40640dd1c0de11f1cf89c1").into(), - hex!("f107dce04faa86a28fc5d4a618be9cb8d4fc3c23d6c42c3624f3ff4bf6586a03").into(), - hex!("a501cdc02e86969ac3e4d0c5a36f4f049efaa1ab8cb6693f51d130eb52a80f30").into(), + hex!("f12d9aededc72724e417b518fe6f847684f26f81616243dedf8c551cc7d504f5").into(), + hex!("89a85d0907ab3fd6e00ae385f61d456c6191646404ae7b8d23d0e60440cf4d00").into(), + hex!("9fc943b6020eb61d780d78bcc6f6102a81d2c868d58f36e61c6e286a2dc4d8c2").into(), ], - block_roots_root: hex!("d160b7687041891b73e54b06fc4e04f82d0fa8fdd76705895e216c6b24709dfe").into(), + block_roots_root: hex!("b9aab9c388c4e4fcd899b71f62c498fc73406e38e8eb14aa440e9affa06f2a10").into(), block_roots_branch: vec![ - hex!("105290e42d98ab6a0ada6e55453cede36c672abf645eeb986b88d7487616e135").into(), - hex!("9da41f274bcdf6122335443d9ce94d07163b48dba3e2f9499ff56f4e48b48b99").into(), - hex!("ecea7e1d3152d8130e83afdfe34b4de4ba2b69a33c9471991096daf454de9cf5").into(), - hex!("b2bf1758e50b2bfff29169fbc70fdb884b2b05bb615dbc53567574da6f4f1ae2").into(), - hex!("cd87069daf70975779126d6af833b7d636c75ca4d5e750ebcad0e76408a5e5bf").into(), + hex!("733422bd810895dab74cbbe07c69dd440cbb51f573181ad4dddac30fcdd0f41f").into(), + hex!("9b9eca73ab01d14549c325ba1b4610bb20bf1f8ec2dbd649f9d8cc7f3cea75fa").into(), + hex!("bcc666ad0ad9f9725cbd682bc95589d35b1b53b2a615f1e6e8dd5e086336becf").into(), + hex!("3069b547a08f703a1715016e926cbd64e71f93f64fb68d98d8c8f1ab745c46e5").into(), + hex!("c2de7e1097239404e17b263cfa0473533cc41e903cb03440d633bc5c27314cb4").into(), ] }) } -pub fn make_execution_header_update() -> Box { - Box::new(ExecutionHeaderUpdate { +pub fn make_execution_proof() -> Box { + Box::new(ExecutionProof { header: BeaconHeader { - slot: 215, - proposer_index: 2, - parent_root: hex!("97518f531a252bb6ca547b21aca9da767943ec99211d3b15c804e34c3a523f45").into(), - state_root: hex!("b088b5a3a8c90d6dc919a695cd7bb0267c6f983ea2e675c559ceb8f46cb90b67").into(), - body_root: hex!("0ba23c8224fdd01531d5ad51486353bd524a0b4c20bca704e26d3210616f829b").into(), + slot: 393, + proposer_index: 4, + parent_root: hex!("6545b47a614a1dd4cad042a0cdbbf5be347e8ffcdc02c6c64540d5153acebeef").into(), + state_root: hex!("b62ac34a8cb82497be9542fe2114410c9f6021855b766015406101a1f3d86434").into(), + body_root: hex!("04005fe231e11a5b7b1580cb73b177ae8b338bedd745497e6bb7122126a806db").into(), }, ancestry_proof: Some(AncestryProof { header_branch: vec![ - hex!("97518f531a252bb6ca547b21aca9da767943ec99211d3b15c804e34c3a523f45").into(), - hex!("5ce0db996bd499c2b4f7a93263d5aafd052f420efb617cce6fdd54e25516aa45").into(), - hex!("84f0e373b66011ce774c7061440c0a50a51cce2b4b335395eee3e563d605597f").into(), - hex!("48f9ccc5f9594142c18c3b5c39a99f0549329c6ab3ba06c9a50030eadca87770").into(), - hex!("f89d6e311e05bc75a6f63ce118bccce254551f1a88d54c3b4f773f81f946bd99").into(), - hex!("2edd6d893c22636675147c07dfcdb541a146e87c3f15b51c388be4868246dc9b").into(), - hex!("d76b7de5f856e3208a91a42c9c398a7f4fab35e667bf916346050ae742514a2d").into(), - hex!("83a2e233e76385953ca41de4c3afe60471a61f0cc1b3846b4a0670e3e563b747").into(), - hex!("e783a5a109c2ad74e4eb53e8f6b11b31266a92a9e16c1fd5873109c5d41b282c").into(), - hex!("d4ea1ef3869ee6a0fd0b19d7d70027d144eecd4f1d32cbf47632a0a9069164b9").into(), - hex!("f8179564b58eb93a850d35e4156a04db651106442ad891c3e85155c1762792f1").into(), - hex!("4cbb1edb48cf1e32fb30db60aaaeaf6190ffe4d0c8dbc96cec307daecb78be12").into(), + hex!("6545b47a614a1dd4cad042a0cdbbf5be347e8ffcdc02c6c64540d5153acebeef").into(), + hex!("fa84cc88ca53a72181599ff4eb07d8b444bce023fe2347c3b4f51004c43439d3").into(), + hex!("cadc8ae211c6f2221c9138e829249adf902419c78eb4727a150baa4d9a02cc9d").into(), + hex!("33a89962df08a35c52bd7e1d887cd71fa7803e68787d05c714036f6edf75947c").into(), + hex!("2c9760fce5c2829ef3f25595a703c21eb22d0186ce223295556ed5da663a82cf").into(), + hex!("e1aa87654db79c8a0ecd6c89726bb662fcb1684badaef5cd5256f479e3c622e1").into(), + hex!("aa70d5f314e4a1fbb9c362f3db79b21bf68b328887248651fbd29fc501d0ca97").into(), + hex!("160b6c235b3a1ed4ef5f80b03ee1c76f7bf3f591c92fca9d8663e9221b9f9f0f").into(), + hex!("f68d7dcd6a07a18e9de7b5d2aa1980eb962e11d7dcb584c96e81a7635c8d2535").into(), + hex!("1d5f912dfd6697110dd1ecb5cb8e77952eef57d85deb373572572df62bb157fc").into(), + hex!("ffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b").into(), + hex!("6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220").into(), hex!("b7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f").into(), ], - finalized_block_root: hex!("890a7f23b9ed2160901654be9efc575d6830ca860e2a97866ae3423fb7bd7231").into(), + finalized_block_root: hex!("751414cd97c0624f922b3e80285e9f776b08fa22fd5f87391f2ed7ef571a8d46").into(), }), execution_header: VersionedExecutionPayloadHeader::Deneb(deneb::ExecutionPayloadHeader { - parent_hash: hex!("d82ec63f5c5e6ba61d62f09c188f158e6449b94bdcc31941e68639eec3c4cf7a").into(), + parent_hash: hex!("8092290aa21b7751576440f77edd02a94058429ce50e63a92d620951fb25eda2").into(), fee_recipient: hex!("0000000000000000000000000000000000000000").into(), - state_root: hex!("8b65545fe5f3216b47b6339b9c91ca2b7f1032a970b04246d9e9fb4460ee34c3").into(), - receipts_root: hex!("7b1f61b9714c080ef0be014e01657a15f45f0304b477beebc7ca5596c8033095").into(), - logs_bloom: hex!("00000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000000000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000040004000000000000002000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000200000000000010").into(), - prev_randao: hex!("6d9e2a012d82b1b6cb0a2c1c1ed24cc16dbb56e6e39ae545371e0666ab057862").into(), - block_number: 215, - gas_limit: 64842908, - gas_used: 119301, - timestamp: 1705859527, - extra_data: hex!("d983010d0a846765746888676f312e32312e358664617277696e").into(), + state_root: hex!("96a83e9ddf745346fafcb0b03d57314623df669ed543c110662b21302a0fae8b").into(), + receipts_root: hex!("dccdfceea05036f7b61dcdabadc937945d31e68a8d3dfd4dc85684457988c284").into(), + logs_bloom: hex!("00000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000400000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000080000000000000000000000000000040004000000000000002002002000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000080000000000000000000000000000000000100000000000000000200000200000010").into(), + prev_randao: hex!("62e309d4f5119d1f5c783abc20fc1a549efbab546d8d0b25ff1cfd58be524e67").into(), + block_number: 393, + gas_limit: 54492273, + gas_used: 199644, + timestamp: 1710552813, + extra_data: hex!("d983010d0b846765746888676f312e32312e368664617277696e").into(), base_fee_per_gas: U256::from(7u64), - block_hash: hex!("48498dbfbcfae53a7f4c289ee00747aceea925f6260c50ead5a33e1c55c40f98").into(), - transactions_root: hex!("5ebc1347fe3df0611d4f66b19bd8e1c6f4eaed0371d850f14c83b1c77ea234e6").into(), + block_hash: hex!("6a9810efb9581d30c1a5c9074f27c68ea779a8c1ae31c213241df16225f4e131").into(), + transactions_root: hex!("2cfa6ed7327e8807c7973516c5c32a68ef2459e586e8067e113d081c3bd8c07d").into(), withdrawals_root: hex!("792930bbd5baac43bcc798ee49aa8185ef76bb3b44ba62b91d86ae569e4bb535").into(), blob_gas_used: 0, excess_blob_gas: 0, }), execution_branch: vec![ - hex!("f8c69d3830406d668619bcccc13c8dddde41e863326f7418b241d5924c4ad34a").into(), + hex!("a6833fa629f3286b6916c6e50b8bf089fc9126bee6f64d0413b4e59c1265834d").into(), hex!("b46f0c01805fe212e15907981b757e6c496b0cb06664224655613dcec82505bb").into(), hex!("db56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71").into(), - hex!("f4d6b5cf9c6e212615c3674fa625d04eb1114153fb221ef5ad02aa433fc67cfb").into(), + hex!("d3af7c05c516726be7505239e0b9c7cb53d24abce6b91cdb3b3995f0164a75da").into(), ], }) } + +pub fn make_inbound_fixture() -> InboundQueueFixture { + InboundQueueFixture { + message: Message { + event_log: Log { + address: hex!("eda338e4dc46038493b885327842fd3e301cab39").into(), + topics: vec![ + hex!("7153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84f").into(), + hex!("c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539").into(), + hex!("5f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0").into(), + ], + data: hex!("00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e00a736aa00000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d00e40b54020000000000000000000000000000000000000000000000000000000000").into(), + }, + proof: Proof { + receipt_proof: (vec![ + hex!("dccdfceea05036f7b61dcdabadc937945d31e68a8d3dfd4dc85684457988c284").to_vec(), + hex!("4a98e45a319168b0fc6005ce6b744ee9bf54338e2c0784b976a8578d241ced0f").to_vec(), + ], vec![ + hex!("f851a09c01dd6d2d8de951c45af23d3ad00829ce021c04d6c8acbe1612d456ee320d4980808080808080a04a98e45a319168b0fc6005ce6b744ee9bf54338e2c0784b976a8578d241ced0f8080808080808080").to_vec(), + hex!("f9028c30b9028802f90284018301d205b9010000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000000000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000040004000000000000002000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000200000000000010f90179f85894eda338e4dc46038493b885327842fd3e301cab39e1a0f78bb28d4b1d7da699e5c0bc2be29c2b04b5aab6aacf6298fe5304f9db9c6d7ea000000000000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7df9011c94eda338e4dc46038493b885327842fd3e301cab39f863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a05f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0b8a000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e00a736aa00000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d00e40b54020000000000000000000000000000000000000000000000000000000000").to_vec(), + ]), + execution_proof: ExecutionProof { + header: BeaconHeader { + slot: 393, + proposer_index: 4, + parent_root: hex!("6545b47a614a1dd4cad042a0cdbbf5be347e8ffcdc02c6c64540d5153acebeef").into(), + state_root: hex!("b62ac34a8cb82497be9542fe2114410c9f6021855b766015406101a1f3d86434").into(), + body_root: hex!("04005fe231e11a5b7b1580cb73b177ae8b338bedd745497e6bb7122126a806db").into(), + }, + ancestry_proof: Some(AncestryProof { + header_branch: vec![ + hex!("6545b47a614a1dd4cad042a0cdbbf5be347e8ffcdc02c6c64540d5153acebeef").into(), + hex!("fa84cc88ca53a72181599ff4eb07d8b444bce023fe2347c3b4f51004c43439d3").into(), + hex!("cadc8ae211c6f2221c9138e829249adf902419c78eb4727a150baa4d9a02cc9d").into(), + hex!("33a89962df08a35c52bd7e1d887cd71fa7803e68787d05c714036f6edf75947c").into(), + hex!("2c9760fce5c2829ef3f25595a703c21eb22d0186ce223295556ed5da663a82cf").into(), + hex!("e1aa87654db79c8a0ecd6c89726bb662fcb1684badaef5cd5256f479e3c622e1").into(), + hex!("aa70d5f314e4a1fbb9c362f3db79b21bf68b328887248651fbd29fc501d0ca97").into(), + hex!("160b6c235b3a1ed4ef5f80b03ee1c76f7bf3f591c92fca9d8663e9221b9f9f0f").into(), + hex!("f68d7dcd6a07a18e9de7b5d2aa1980eb962e11d7dcb584c96e81a7635c8d2535").into(), + hex!("1d5f912dfd6697110dd1ecb5cb8e77952eef57d85deb373572572df62bb157fc").into(), + hex!("ffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b").into(), + hex!("6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220").into(), + hex!("b7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f").into(), + ], + finalized_block_root: hex!("751414cd97c0624f922b3e80285e9f776b08fa22fd5f87391f2ed7ef571a8d46").into(), + }), + execution_header: VersionedExecutionPayloadHeader::Deneb(deneb::ExecutionPayloadHeader { + parent_hash: hex!("8092290aa21b7751576440f77edd02a94058429ce50e63a92d620951fb25eda2").into(), + fee_recipient: hex!("0000000000000000000000000000000000000000").into(), + state_root: hex!("96a83e9ddf745346fafcb0b03d57314623df669ed543c110662b21302a0fae8b").into(), + receipts_root: hex!("dccdfceea05036f7b61dcdabadc937945d31e68a8d3dfd4dc85684457988c284").into(), + logs_bloom: hex!("00000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000400000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000080000000000000000000000000000040004000000000000002002002000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000080000000000000000000000000000000000100000000000000000200000200000010").into(), + prev_randao: hex!("62e309d4f5119d1f5c783abc20fc1a549efbab546d8d0b25ff1cfd58be524e67").into(), + block_number: 393, + gas_limit: 54492273, + gas_used: 199644, + timestamp: 1710552813, + extra_data: hex!("d983010d0b846765746888676f312e32312e368664617277696e").into(), + base_fee_per_gas: U256::from(7u64), + block_hash: hex!("6a9810efb9581d30c1a5c9074f27c68ea779a8c1ae31c213241df16225f4e131").into(), + transactions_root: hex!("2cfa6ed7327e8807c7973516c5c32a68ef2459e586e8067e113d081c3bd8c07d").into(), + withdrawals_root: hex!("792930bbd5baac43bcc798ee49aa8185ef76bb3b44ba62b91d86ae569e4bb535").into(), + blob_gas_used: 0, + excess_blob_gas: 0, + }), + execution_branch: vec![ + hex!("a6833fa629f3286b6916c6e50b8bf089fc9126bee6f64d0413b4e59c1265834d").into(), + hex!("b46f0c01805fe212e15907981b757e6c496b0cb06664224655613dcec82505bb").into(), + hex!("db56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71").into(), + hex!("d3af7c05c516726be7505239e0b9c7cb53d24abce6b91cdb3b3995f0164a75da").into(), + ], + } + }, + }, + finalized_header: BeaconHeader { + slot: 864, + proposer_index: 4, + parent_root: hex!("614e7672f991ac268cd841055973f55e1e42228831a211adef207bb7329be614").into(), + state_root: hex!("5fa8dfca3d760e4242ab46d529144627aa85348a19173b6e081172c701197a4a").into(), + body_root: hex!("0f34c083b1803666bb1ac5e73fa71582731a2cf37d279ff0a3b0cad5a2ff371e").into(), + }, + block_roots_root: hex!("b9aab9c388c4e4fcd899b71f62c498fc73406e38e8eb14aa440e9affa06f2a10").into(), + } +} diff --git a/bridges/snowbridge/pallets/ethereum-client/src/benchmarking/mod.rs b/bridges/snowbridge/pallets/ethereum-client/src/benchmarking/mod.rs index e1520cd715393e79f0265b07263fa9b762206c97..4b8796b628d793951fec9ba905db1cd17f544eee 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/benchmarking/mod.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/benchmarking/mod.rs @@ -65,24 +65,6 @@ mod benchmarks { Ok(()) } - #[benchmark] - fn submit_execution_header() -> Result<(), BenchmarkError> { - let caller: T::AccountId = whitelisted_caller(); - let checkpoint_update = make_checkpoint(); - let finalized_header_update = make_finalized_header_update(); - let execution_header_update = make_execution_header_update(); - let execution_header_hash = execution_header_update.execution_header.block_hash(); - EthereumBeaconClient::::process_checkpoint_update(&checkpoint_update)?; - EthereumBeaconClient::::process_update(&finalized_header_update)?; - - #[extrinsic_call] - _(RawOrigin::Signed(caller.clone()), Box::new(*execution_header_update)); - - assert!(>::contains_key(execution_header_hash)); - - Ok(()) - } - #[benchmark(extra)] fn bls_fast_aggregate_verify_pre_aggregated() -> Result<(), BenchmarkError> { EthereumBeaconClient::::process_checkpoint_update(&make_checkpoint())?; diff --git a/bridges/snowbridge/pallets/ethereum-client/src/impls.rs b/bridges/snowbridge/pallets/ethereum-client/src/impls.rs index 300431d87707ddcfd15eb7937f8ef581c157aeed..f600b1f67e29e7875bfccd6fab6926edda6498cc 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/impls.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/impls.rs @@ -1,6 +1,8 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: 2023 Snowfork use super::*; +use frame_support::ensure; +use primitives::ExecutionProof; use snowbridge_core::inbound::{ VerificationError::{self, *}, @@ -14,32 +16,13 @@ impl Verifier for Pallet { /// the log should be in the beacon client storage, meaning it has been verified and is an /// ancestor of a finalized beacon block. fn verify(event_log: &Log, proof: &Proof) -> Result<(), VerificationError> { - log::info!( - target: "ethereum-client", - "💫 Verifying message with block hash {}", - proof.block_hash, - ); + Self::verify_execution_proof(&proof.execution_proof) + .map_err(|e| InvalidExecutionProof(e.into()))?; - let header = >::get(proof.block_hash).ok_or(HeaderNotFound)?; - - let receipt = match Self::verify_receipt_inclusion(header.receipts_root, proof) { - Ok(receipt) => receipt, - Err(err) => { - log::error!( - target: "ethereum-client", - "💫 Verification of receipt inclusion failed for block {}: {:?}", - proof.block_hash, - err - ); - return Err(err) - }, - }; - - log::trace!( - target: "ethereum-client", - "💫 Verified receipt inclusion for transaction at index {} in block {}", - proof.tx_index, proof.block_hash, - ); + let receipt = Self::verify_receipt_inclusion( + proof.execution_proof.execution_header.receipts_root(), + &proof.receipt_proof.1, + )?; event_log.validate().map_err(|_| InvalidLog)?; @@ -53,18 +36,11 @@ impl Verifier for Pallet { if !receipt.contains_log(&event_log) { log::error!( target: "ethereum-client", - "💫 Event log not found in receipt for transaction at index {} in block {}", - proof.tx_index, proof.block_hash, + "💫 Event log not found in receipt for transaction", ); return Err(LogNotFound) } - log::info!( - target: "ethereum-client", - "💫 Receipt verification successful for {}", - proof.block_hash, - ); - Ok(()) } } @@ -74,9 +50,9 @@ impl Pallet { /// `proof.block_hash`. pub fn verify_receipt_inclusion( receipts_root: H256, - proof: &Proof, + receipt_proof: &[Vec], ) -> Result { - let result = verify_receipt_proof(receipts_root, &proof.data.1).ok_or(InvalidProof)?; + let result = verify_receipt_proof(receipts_root, receipt_proof).ok_or(InvalidProof)?; match result { Ok(receipt) => Ok(receipt), @@ -90,4 +66,96 @@ impl Pallet { }, } } + + /// Validates an execution header with ancestry_proof against a finalized checkpoint on + /// chain.The beacon header containing the execution header is sent, plus the execution header, + /// along with a proof that the execution header is rooted in the beacon header body. + pub(crate) fn verify_execution_proof(execution_proof: &ExecutionProof) -> DispatchResult { + let latest_finalized_state = + FinalizedBeaconState::::get(LatestFinalizedBlockRoot::::get()) + .ok_or(Error::::NotBootstrapped)?; + // Checks that the header is an ancestor of a finalized header, using slot number. + ensure!( + execution_proof.header.slot <= latest_finalized_state.slot, + Error::::HeaderNotFinalized + ); + + // Gets the hash tree root of the execution header, in preparation for the execution + // header proof (used to check that the execution header is rooted in the beacon + // header body. + let execution_header_root: H256 = execution_proof + .execution_header + .hash_tree_root() + .map_err(|_| Error::::BlockBodyHashTreeRootFailed)?; + + ensure!( + verify_merkle_branch( + execution_header_root, + &execution_proof.execution_branch, + config::EXECUTION_HEADER_SUBTREE_INDEX, + config::EXECUTION_HEADER_DEPTH, + execution_proof.header.body_root + ), + Error::::InvalidExecutionHeaderProof + ); + + let beacon_block_root: H256 = execution_proof + .header + .hash_tree_root() + .map_err(|_| Error::::HeaderHashTreeRootFailed)?; + + match &execution_proof.ancestry_proof { + Some(proof) => { + Self::verify_ancestry_proof( + beacon_block_root, + execution_proof.header.slot, + &proof.header_branch, + proof.finalized_block_root, + )?; + }, + None => { + // If the ancestry proof is not provided, we expect this beacon header to be a + // finalized beacon header. We need to check that the header hash matches the + // finalized header root at the expected slot. + let state = >::get(beacon_block_root) + .ok_or(Error::::ExpectedFinalizedHeaderNotStored)?; + if execution_proof.header.slot != state.slot { + return Err(Error::::ExpectedFinalizedHeaderNotStored.into()) + } + }, + } + + Ok(()) + } + + /// Verify that `block_root` is an ancestor of `finalized_block_root` Used to prove that + /// an execution header is an ancestor of a finalized header (i.e. the blocks are + /// on the same chain). + fn verify_ancestry_proof( + block_root: H256, + block_slot: u64, + block_root_proof: &[H256], + finalized_block_root: H256, + ) -> DispatchResult { + let state = >::get(finalized_block_root) + .ok_or(Error::::ExpectedFinalizedHeaderNotStored)?; + + ensure!(block_slot < state.slot, Error::::HeaderNotFinalized); + + let index_in_array = block_slot % (SLOTS_PER_HISTORICAL_ROOT as u64); + let leaf_index = (SLOTS_PER_HISTORICAL_ROOT as u64) + index_in_array; + + ensure!( + verify_merkle_branch( + block_root, + block_root_proof, + leaf_index as usize, + config::BLOCK_ROOT_AT_INDEX_DEPTH, + state.block_roots_root + ), + Error::::InvalidAncestryMerkleProof + ); + + Ok(()) + } } diff --git a/bridges/snowbridge/pallets/ethereum-client/src/lib.rs b/bridges/snowbridge/pallets/ethereum-client/src/lib.rs index fc2ab2fbb588d0d4e6e4c12cbf3ee365cd4aafc2..c1b9e19729bc47588ad179db0c9c280a05834bc1 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/lib.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/lib.rs @@ -10,13 +10,11 @@ //! //! * [`Call::force_checkpoint`]: Set the initial trusted consensus checkpoint. //! * [`Call::set_operating_mode`]: Set the operating mode of the pallet. Can be used to disable -//! processing of conensus updates. +//! processing of consensus updates. //! //! ## Consensus Updates //! //! * [`Call::submit`]: Submit a finalized beacon header with an optional sync committee update -//! * [`Call::submit_execution_header`]: Submit an execution header together with an ancestry proof -//! that can be verified against an already imported finalized beacon header. #![cfg_attr(not(feature = "std"), no_std)] pub mod config; @@ -40,8 +38,7 @@ use frame_support::{ use frame_system::ensure_signed; use primitives::{ fast_aggregate_verify, verify_merkle_branch, verify_receipt_proof, BeaconHeader, BlsError, - CompactBeaconState, CompactExecutionHeader, ExecutionHeaderState, ForkData, ForkVersion, - ForkVersions, PublicKeyPrepared, SigningData, + CompactBeaconState, ForkData, ForkVersion, ForkVersions, PublicKeyPrepared, SigningData, }; use snowbridge_core::{BasicOperatingMode, RingBufferMap}; use sp_core::H256; @@ -51,11 +48,7 @@ pub use weights::WeightInfo; use functions::{ compute_epoch, compute_period, decompress_sync_committee_bits, sync_committee_sum, }; -pub use types::ExecutionHeaderBuffer; -use types::{ - CheckpointUpdate, ExecutionHeaderUpdate, FinalizedBeaconStateBuffer, SyncCommitteePrepared, - Update, -}; +use types::{CheckpointUpdate, FinalizedBeaconStateBuffer, SyncCommitteePrepared, Update}; pub use pallet::*; @@ -76,10 +69,7 @@ pub mod pallet { pub struct MaxFinalizedHeadersToKeep(PhantomData); impl Get for MaxFinalizedHeadersToKeep { fn get() -> u32 { - // Consider max latency allowed between LatestFinalizedState and LatestExecutionState is - // the total slots in one sync_committee_period so 1 should be fine we keep 2 periods - // here for redundancy. - const MAX_REDUNDANCY: u32 = 2; + const MAX_REDUNDANCY: u32 = 20; config::EPOCHS_PER_SYNC_COMMITTEE_PERIOD as u32 * MAX_REDUNDANCY } } @@ -92,9 +82,6 @@ pub mod pallet { type RuntimeEvent: From> + IsType<::RuntimeEvent>; #[pallet::constant] type ForkVersions: Get; - /// Maximum number of execution headers to keep - #[pallet::constant] - type MaxExecutionHeadersToKeep: Get; type WeightInfo: WeightInfo; } @@ -105,10 +92,6 @@ pub mod pallet { block_hash: H256, slot: u64, }, - ExecutionHeaderImported { - block_hash: H256, - block_number: u64, - }, SyncCommitteeUpdated { period: u64, }, @@ -191,25 +174,6 @@ pub mod pallet { pub(super) type NextSyncCommittee = StorageValue<_, SyncCommitteePrepared, ValueQuery>; - /// Latest imported execution header - #[pallet::storage] - #[pallet::getter(fn latest_execution_state)] - pub(super) type LatestExecutionState = - StorageValue<_, ExecutionHeaderState, ValueQuery>; - - /// Execution Headers - #[pallet::storage] - pub type ExecutionHeaders = - StorageMap<_, Identity, H256, CompactExecutionHeader, OptionQuery>; - - /// Execution Headers: Current position in ring buffer - #[pallet::storage] - pub type ExecutionHeaderIndex = StorageValue<_, u32, ValueQuery>; - - /// Execution Headers: Mapping of ring buffer index to a pruning candidate - #[pallet::storage] - pub type ExecutionHeaderMapping = StorageMap<_, Identity, u32, H256, ValueQuery>; - /// The current operating mode of the pallet. #[pallet::storage] #[pallet::getter(fn operating_mode)] @@ -248,21 +212,6 @@ pub mod pallet { Ok(()) } - #[pallet::call_index(2)] - #[pallet::weight(T::WeightInfo::submit_execution_header())] - #[transactional] - /// Submits a new execution header update. The relevant related beacon header - /// is also included to prove the execution header, as well as ancestry proof data. - pub fn submit_execution_header( - origin: OriginFor, - update: Box, - ) -> DispatchResult { - ensure_signed(origin)?; - ensure!(!Self::operating_mode().is_halted(), Error::::Halted); - Self::process_execution_header_update(&update)?; - Ok(()) - } - /// Halt or resume all pallet operations. May only be called by root. #[pallet::call_index(3)] #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] @@ -325,41 +274,19 @@ pub mod pallet { >::set(sync_committee_prepared); >::kill(); InitialCheckpointRoot::::set(header_root); - >::kill(); Self::store_validators_root(update.validators_root); - Self::store_finalized_header(header_root, update.header, update.block_roots_root)?; + Self::store_finalized_header(update.header, update.block_roots_root)?; Ok(()) } pub(crate) fn process_update(update: &Update) -> DispatchResult { - Self::cross_check_execution_state()?; Self::verify_update(update)?; Self::apply_update(update)?; Ok(()) } - /// Cross check to make sure that execution header import does not fall too far behind - /// finalised beacon header import. If that happens just return an error and pause - /// processing until execution header processing has caught up. - pub(crate) fn cross_check_execution_state() -> DispatchResult { - let latest_finalized_state = - FinalizedBeaconState::::get(LatestFinalizedBlockRoot::::get()) - .ok_or(Error::::NotBootstrapped)?; - let latest_execution_state = Self::latest_execution_state(); - // The execution header import should be at least within the slot range of a sync - // committee period. - let max_latency = config::EPOCHS_PER_SYNC_COMMITTEE_PERIOD * config::SLOTS_PER_EPOCH; - ensure!( - latest_execution_state.beacon_slot == 0 || - latest_finalized_state.slot < - latest_execution_state.beacon_slot + max_latency as u64, - Error::::ExecutionHeaderTooFarBehind - ); - Ok(()) - } - /// References and strictly follows /// Verifies that provided next sync committee is valid through a series of checks /// (including checking that a sync committee period isn't skipped and that the header is @@ -534,130 +461,12 @@ pub mod pallet { }; if update.finalized_header.slot > latest_finalized_state.slot { - let finalized_block_root: H256 = update - .finalized_header - .hash_tree_root() - .map_err(|_| Error::::HeaderHashTreeRootFailed)?; - Self::store_finalized_header( - finalized_block_root, - update.finalized_header, - update.block_roots_root, - )?; + Self::store_finalized_header(update.finalized_header, update.block_roots_root)?; } Ok(()) } - /// Validates an execution header for import. The beacon header containing the execution - /// header is sent, plus the execution header, along with a proof that the execution header - /// is rooted in the beacon header body. - pub(crate) fn process_execution_header_update( - update: &ExecutionHeaderUpdate, - ) -> DispatchResult { - let latest_finalized_state = - FinalizedBeaconState::::get(LatestFinalizedBlockRoot::::get()) - .ok_or(Error::::NotBootstrapped)?; - // Checks that the header is an ancestor of a finalized header, using slot number. - ensure!( - update.header.slot <= latest_finalized_state.slot, - Error::::HeaderNotFinalized - ); - - // Checks that we don't skip execution headers, they need to be imported sequentially. - let latest_execution_state: ExecutionHeaderState = Self::latest_execution_state(); - ensure!( - latest_execution_state.block_number == 0 || - update.execution_header.block_number() == - latest_execution_state.block_number + 1, - Error::::ExecutionHeaderSkippedBlock - ); - - // Gets the hash tree root of the execution header, in preparation for the execution - // header proof (used to check that the execution header is rooted in the beacon - // header body. - let execution_header_root: H256 = update - .execution_header - .hash_tree_root() - .map_err(|_| Error::::BlockBodyHashTreeRootFailed)?; - - ensure!( - verify_merkle_branch( - execution_header_root, - &update.execution_branch, - config::EXECUTION_HEADER_SUBTREE_INDEX, - config::EXECUTION_HEADER_DEPTH, - update.header.body_root - ), - Error::::InvalidExecutionHeaderProof - ); - - let block_root: H256 = update - .header - .hash_tree_root() - .map_err(|_| Error::::HeaderHashTreeRootFailed)?; - - match &update.ancestry_proof { - Some(proof) => { - Self::verify_ancestry_proof( - block_root, - update.header.slot, - &proof.header_branch, - proof.finalized_block_root, - )?; - }, - None => { - // If the ancestry proof is not provided, we expect this header to be a - // finalized header. We need to check that the header hash matches the finalized - // header root at the expected slot. - let state = >::get(block_root) - .ok_or(Error::::ExpectedFinalizedHeaderNotStored)?; - if update.header.slot != state.slot { - return Err(Error::::ExpectedFinalizedHeaderNotStored.into()) - } - }, - } - - Self::store_execution_header( - update.execution_header.block_hash(), - update.execution_header.clone().into(), - update.header.slot, - block_root, - ); - - Ok(()) - } - - /// Verify that `block_root` is an ancestor of `finalized_block_root` Used to prove that - /// an execution header is an ancestor of a finalized header (i.e. the blocks are - /// on the same chain). - fn verify_ancestry_proof( - block_root: H256, - block_slot: u64, - block_root_proof: &[H256], - finalized_block_root: H256, - ) -> DispatchResult { - let state = >::get(finalized_block_root) - .ok_or(Error::::ExpectedFinalizedHeaderNotStored)?; - - ensure!(block_slot < state.slot, Error::::HeaderNotFinalized); - - let index_in_array = block_slot % (SLOTS_PER_HISTORICAL_ROOT as u64); - let leaf_index = (SLOTS_PER_HISTORICAL_ROOT as u64) + index_in_array; - - ensure!( - verify_merkle_branch( - block_root, - block_root_proof, - leaf_index as usize, - config::BLOCK_ROOT_AT_INDEX_DEPTH, - state.block_roots_root - ), - Error::::InvalidAncestryMerkleProof - ); - - Ok(()) - } - /// Computes the signing root for a given beacon header and domain. The hash tree root /// of the beacon header is computed, and then the combination of the beacon header hash /// and the domain makes up the signing root. @@ -679,13 +488,15 @@ pub mod pallet { /// Stores a compacted (slot and block roots root (hash of the `block_roots` beacon state /// field, used for ancestry proof)) beacon state in a ring buffer map, with the header root /// as map key. - fn store_finalized_header( - header_root: H256, + pub fn store_finalized_header( header: BeaconHeader, block_roots_root: H256, ) -> DispatchResult { let slot = header.slot; + let header_root: H256 = + header.hash_tree_root().map_err(|_| Error::::HeaderHashTreeRootFailed)?; + >::insert( header_root, CompactBeaconState { slot: header.slot, block_roots_root }, @@ -704,36 +515,6 @@ pub mod pallet { Ok(()) } - /// Stores the provided execution header in pallet storage. The header is stored - /// in a ring buffer map, with the block hash as map key. The last imported execution - /// header is also kept in storage, for the relayer to check import progress. - pub fn store_execution_header( - block_hash: H256, - header: CompactExecutionHeader, - beacon_slot: u64, - beacon_block_root: H256, - ) { - let block_number = header.block_number; - - >::insert(block_hash, header); - - log::trace!( - target: LOG_TARGET, - "💫 Updated latest execution block at {} to number {}.", - block_hash, - block_number - ); - - LatestExecutionState::::mutate(|s| { - s.beacon_block_root = beacon_block_root; - s.beacon_slot = beacon_slot; - s.block_hash = block_hash; - s.block_number = block_number; - }); - - Self::deposit_event(Event::ExecutionHeaderImported { block_hash, block_number }); - } - /// Stores the validators root in storage. Validators root is the hash tree root of all the /// validators at genesis and is used to used to identify the chain that we are on /// (used in conjunction with the fork version). diff --git a/bridges/snowbridge/pallets/ethereum-client/src/mock.rs b/bridges/snowbridge/pallets/ethereum-client/src/mock.rs index 799b14f4773e421a37b75d226d7829ce8bb7dcf6..bd6144ebd8f9335ff02a82ad756d1b6dd06125e5 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/mock.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/mock.rs @@ -2,12 +2,13 @@ // SPDX-FileCopyrightText: 2023 Snowfork use crate as ethereum_beacon_client; use crate::config; -use frame_support::{derive_impl, parameter_types}; -use hex_literal::hex; +use frame_support::{derive_impl, dispatch::DispatchResult, parameter_types}; use pallet_timestamp; -use primitives::{CompactExecutionHeader, Fork, ForkVersions}; +use primitives::{Fork, ForkVersions}; use snowbridge_core::inbound::{Log, Proof}; +use sp_std::default::Default; use std::{fs::File, path::PathBuf}; + type Block = frame_system::mocking::MockBlock; use sp_runtime::BuildStorage; @@ -20,8 +21,8 @@ where serde_json::from_reader(File::open(filepath).unwrap()) } -pub fn load_execution_header_update_fixture() -> primitives::ExecutionHeaderUpdate { - load_fixture("execution-header-update.json".to_string()).unwrap() +pub fn load_execution_proof_fixture() -> primitives::ExecutionProof { + load_fixture("execution-proof.json".to_string()).unwrap() } pub fn load_checkpoint_update_fixture( @@ -50,41 +51,8 @@ pub fn load_next_finalized_header_update_fixture( } pub fn get_message_verification_payload() -> (Log, Proof) { - ( - Log { - address: hex!("ee9170abfbf9421ad6dd07f6bdec9d89f2b581e0").into(), - topics: vec![ - hex!("1b11dcf133cc240f682dab2d3a8e4cd35c5da8c9cf99adac4336f8512584c5ad").into(), - hex!("00000000000000000000000000000000000000000000000000000000000003e8").into(), - hex!("0000000000000000000000000000000000000000000000000000000000000001").into(), - ], - data: hex!("0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004b000f000000000000000100d184c103f7acc340847eee82a0b909e3358bc28d440edffa1352b13227e8ee646f3ea37456dec701345772617070656420457468657210574554481235003511000000000000000000000000000000000000000000").into(), - }, - Proof { - block_hash: hex!("05aaa60b0f27cce9e71909508527264b77ee14da7b5bf915fcc4e32715333213").into(), - tx_index: 0, - data: (vec![ - hex!("cf0d1c1ba57d1e0edfb59786c7e30c2b7e12bd54612b00cd21c4eaeecedf44fb").to_vec(), - hex!("d21fc4f68ab05bc4dcb23c67008e92c4d466437cdd6ed7aad0c008944c185510").to_vec(), - hex!("b9890f91ca0d77aa2a4adfaf9b9e40c94cac9e638b6d9797923865872944b646").to_vec(), - ], vec![ - hex!("f90131a0b601337b3aa10a671caa724eba641e759399979856141d3aea6b6b4ac59b889ba00c7d5dd48be9060221a02fb8fa213860b4c50d47046c8fa65ffaba5737d569e0a094601b62a1086cd9c9cb71a7ebff9e718f3217fd6e837efe4246733c0a196f63a06a4b0dd0aefc37b3c77828c8f07d1b7a2455ceb5dbfd3c77d7d6aeeddc2f7e8ca0d6e8e23142cdd8ec219e1f5d8b56aa18e456702b195deeaa210327284d42ade4a08a313d4c87023005d1ab631bbfe3f5de1e405d0e66d0bef3e033f1e5711b5521a0bf09a5d9a48b10ade82b8d6a5362a15921c8b5228a3487479b467db97411d82fa0f95cccae2a7c572ef3c566503e30bac2b2feb2d2f26eebf6d870dcf7f8cf59cea0d21fc4f68ab05bc4dcb23c67008e92c4d466437cdd6ed7aad0c008944c1855108080808080808080").to_vec(), - hex!("f851a0b9890f91ca0d77aa2a4adfaf9b9e40c94cac9e638b6d9797923865872944b646a060a634b9280e3a23fb63375e7bbdd9ab07fd379ab6a67e2312bbc112195fa358808080808080808080808080808080").to_vec(), - hex!("f9030820b9030402f90300018301d6e2b9010000000000000800000000000020040008000000000000000000000000400000008000000000000000000000000000000000000000000000000000000000042010000000001000000000000000000000000000000000040000000000000000000000000000000000000000000000008000000000000000002000000000000000000000000200000000000000200000000000100000000040000001000200008000000000000200000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000f901f5f87a942ffa5ecdbe006d30397c7636d3e015eee251369ff842a0c965575a00553e094ca7c5d14f02e107c258dda06867cbf9e0e69f80e71bbcc1a000000000000000000000000000000000000000000000000000000000000003e8a000000000000000000000000000000000000000000000000000000000000003e8f9011c94ee9170abfbf9421ad6dd07f6bdec9d89f2b581e0f863a01b11dcf133cc240f682dab2d3a8e4cd35c5da8c9cf99adac4336f8512584c5ada000000000000000000000000000000000000000000000000000000000000003e8a00000000000000000000000000000000000000000000000000000000000000001b8a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004b000f000000000000000100d184c103f7acc340847eee82a0b909e3358bc28d440edffa1352b13227e8ee646f3ea37456dec701345772617070656420457468657210574554481235003511000000000000000000000000000000000000000000f858948cf6147918a5cbb672703f879f385036f8793a24e1a01449abf21e49fd025f33495e77f7b1461caefdd3d4bb646424a3f445c4576a5ba0000000000000000000000000440edffa1352b13227e8ee646f3ea37456dec701").to_vec(), - ]), - } - ) -} - -pub fn get_message_verification_header() -> CompactExecutionHeader { - CompactExecutionHeader { - parent_hash: hex!("04a7f6ab8282203562c62f38b0ab41d32aaebe2c7ea687702b463148a6429e04") - .into(), - block_number: 55, - state_root: hex!("894d968712976d613519f973a317cb0781c7b039c89f27ea2b7ca193f7befdb3").into(), - receipts_root: hex!("cf0d1c1ba57d1e0edfb59786c7e30c2b7e12bd54612b00cd21c4eaeecedf44fb") - .into(), - } + let inbound_fixture = snowbridge_pallet_ethereum_client_fixtures::make_inbound_fixture(); + (inbound_fixture.message.event_log, inbound_fixture.message.proof) } frame_support::construct_runtime!( @@ -130,20 +98,25 @@ parameter_types! { epoch: 0, } }; - pub const ExecutionHeadersPruneThreshold: u32 = 8192; } impl ethereum_beacon_client::Config for Test { type RuntimeEvent = RuntimeEvent; type ForkVersions = ChainForkVersions; - type MaxExecutionHeadersToKeep = ExecutionHeadersPruneThreshold; type WeightInfo = (); } // Build genesis storage according to the mock runtime. pub fn new_tester() -> sp_io::TestExternalities { let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - let mut ext = sp_io::TestExternalities::new(t); - let _ = ext.execute_with(|| Timestamp::set(RuntimeOrigin::signed(1), 30_000)); + let ext = sp_io::TestExternalities::new(t); ext } + +pub fn initialize_storage() -> DispatchResult { + let inbound_fixture = snowbridge_pallet_ethereum_client_fixtures::make_inbound_fixture(); + EthereumBeaconClient::store_finalized_header( + inbound_fixture.finalized_header, + inbound_fixture.block_roots_root, + ) +} diff --git a/bridges/snowbridge/pallets/ethereum-client/src/tests.rs b/bridges/snowbridge/pallets/ethereum-client/src/tests.rs index 4a7b7b458869de157f4128f0dab0b27cd45452bd..765958c128212ee279bf547eb864335c7921e2eb 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/tests.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/tests.rs @@ -1,14 +1,13 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: 2023 Snowfork use crate::{ - functions::compute_period, pallet::ExecutionHeaders, sync_committee_sum, verify_merkle_branch, - BeaconHeader, CompactBeaconState, Error, ExecutionHeaderBuffer, FinalizedBeaconState, - LatestExecutionState, LatestFinalizedBlockRoot, NextSyncCommittee, SyncCommitteePrepared, + functions::compute_period, sync_committee_sum, verify_merkle_branch, BeaconHeader, + CompactBeaconState, Error, FinalizedBeaconState, LatestFinalizedBlockRoot, NextSyncCommittee, + SyncCommitteePrepared, }; use crate::mock::{ - get_message_verification_header, get_message_verification_payload, - load_checkpoint_update_fixture, load_execution_header_update_fixture, + get_message_verification_payload, load_checkpoint_update_fixture, load_finalized_header_update_fixture, load_next_finalized_header_update_fixture, load_next_sync_committee_update_fixture, load_sync_committee_update_fixture, }; @@ -19,14 +18,9 @@ use crate::config::{EPOCHS_PER_SYNC_COMMITTEE_PERIOD, SLOTS_PER_EPOCH, SLOTS_PER use frame_support::{assert_err, assert_noop, assert_ok}; use hex_literal::hex; use primitives::{ - CompactExecutionHeader, ExecutionHeaderState, Fork, ForkVersions, NextSyncCommitteeUpdate, - VersionedExecutionPayloadHeader, -}; -use rand::{thread_rng, Rng}; -use snowbridge_core::{ - inbound::{VerificationError, Verifier}, - RingBufferMap, + types::deneb, Fork, ForkVersions, NextSyncCommitteeUpdate, VersionedExecutionPayloadHeader, }; +use snowbridge_core::inbound::{VerificationError, Verifier}; use sp_core::H256; use sp_runtime::DispatchError; @@ -212,61 +206,6 @@ pub fn sync_committee_participation_is_supermajority_errors_when_not_supermajori }); } -#[test] -pub fn execution_header_pruning() { - new_tester().execute_with(|| { - let execution_header_prune_threshold = ExecutionHeadersPruneThreshold::get(); - let to_be_deleted = execution_header_prune_threshold / 2; - - let mut stored_hashes = vec![]; - - for i in 0..execution_header_prune_threshold { - let mut hash = H256::default(); - thread_rng().try_fill(&mut hash.0[..]).unwrap(); - EthereumBeaconClient::store_execution_header( - hash, - CompactExecutionHeader::default(), - i as u64, - hash, - ); - stored_hashes.push(hash); - } - - // We should have stored everything until now - assert_eq!({ ExecutionHeaders::::iter().count() }, stored_hashes.len()); - - // Let's push extra entries so that some of the previous entries are deleted. - for i in 0..to_be_deleted { - let mut hash = H256::default(); - thread_rng().try_fill(&mut hash.0[..]).unwrap(); - EthereumBeaconClient::store_execution_header( - hash, - CompactExecutionHeader::default(), - (i + execution_header_prune_threshold) as u64, - hash, - ); - - stored_hashes.push(hash); - } - - // We should have only stored upto `execution_header_prune_threshold` - assert_eq!( - ExecutionHeaders::::iter().count() as u32, - execution_header_prune_threshold - ); - - // First `to_be_deleted` items must be deleted - for i in 0..to_be_deleted { - assert!(!ExecutionHeaders::::contains_key(stored_hashes[i as usize])); - } - - // Other entries should be part of data - for i in to_be_deleted..(to_be_deleted + execution_header_prune_threshold) { - assert!(ExecutionHeaders::::contains_key(stored_hashes[i as usize])); - } - }); -} - #[test] fn compute_fork_version() { let mock_fork_versions = ForkVersions { @@ -348,34 +287,6 @@ fn find_present_keys() { }); } -#[test] -fn cross_check_execution_state() { - new_tester().execute_with(|| { - let header_root: H256 = TEST_HASH.into(); - >::insert( - header_root, - CompactBeaconState { - // set slot to period 5 - slot: ((EPOCHS_PER_SYNC_COMMITTEE_PERIOD * SLOTS_PER_EPOCH) * 5) as u64, - block_roots_root: Default::default(), - }, - ); - LatestFinalizedBlockRoot::::set(header_root); - >::set(ExecutionHeaderState { - beacon_block_root: Default::default(), - // set slot to period 2 - beacon_slot: ((EPOCHS_PER_SYNC_COMMITTEE_PERIOD * SLOTS_PER_EPOCH) * 2) as u64, - block_hash: Default::default(), - block_number: 0, - }); - - assert_err!( - EthereumBeaconClient::cross_check_execution_state(), - Error::::ExecutionHeaderTooFarBehind - ); - }); -} - /* SYNC PROCESS TESTS */ #[test] @@ -608,40 +519,6 @@ fn submit_update_with_skipped_sync_committee_period() { }); } -#[test] -fn submit_update_execution_headers_too_far_behind() { - let checkpoint = Box::new(load_checkpoint_update_fixture()); - let finalized_header_update = Box::new(load_finalized_header_update_fixture()); - let execution_header_update = Box::new(load_execution_header_update_fixture()); - let next_update = Box::new(load_next_sync_committee_update_fixture()); - - new_tester().execute_with(|| { - let far_ahead_finalized_header_slot = finalized_header_update.finalized_header.slot + - (EPOCHS_PER_SYNC_COMMITTEE_PERIOD * SLOTS_PER_EPOCH * 2) as u64; - assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); - assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), finalized_header_update)); - assert_ok!(EthereumBeaconClient::submit_execution_header( - RuntimeOrigin::signed(1), - execution_header_update - )); - - let header_root: H256 = TEST_HASH.into(); - >::insert( - header_root, - CompactBeaconState { - slot: far_ahead_finalized_header_slot, - block_roots_root: Default::default(), - }, - ); - LatestFinalizedBlockRoot::::set(header_root); - - assert_err!( - EthereumBeaconClient::submit(RuntimeOrigin::signed(1), next_update), - Error::::ExecutionHeaderTooFarBehind - ); - }); -} - #[test] fn submit_irrelevant_update() { let checkpoint = Box::new(load_checkpoint_update_fixture()); @@ -703,187 +580,6 @@ fn submit_update_with_invalid_sync_committee_update() { }); } -#[test] -fn submit_execution_header_update() { - let checkpoint = Box::new(load_checkpoint_update_fixture()); - let finalized_header_update = Box::new(load_finalized_header_update_fixture()); - let execution_header_update = Box::new(load_execution_header_update_fixture()); - - new_tester().execute_with(|| { - assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); - assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), finalized_header_update)); - assert_ok!(EthereumBeaconClient::submit_execution_header( - RuntimeOrigin::signed(1), - execution_header_update.clone() - )); - assert!(>::contains_key( - execution_header_update.execution_header.block_hash() - )); - }); -} - -#[test] -fn submit_execution_header_update_invalid_ancestry_proof() { - let checkpoint = Box::new(load_checkpoint_update_fixture()); - let finalized_header_update = Box::new(load_finalized_header_update_fixture()); - let mut execution_header_update = Box::new(load_execution_header_update_fixture()); - if let Some(ref mut ancestry_proof) = execution_header_update.ancestry_proof { - ancestry_proof.header_branch[0] = TEST_HASH.into() - } - - new_tester().execute_with(|| { - assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); - assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), finalized_header_update)); - assert_err!( - EthereumBeaconClient::submit_execution_header( - RuntimeOrigin::signed(1), - execution_header_update - ), - Error::::InvalidAncestryMerkleProof - ); - }); -} - -#[test] -fn submit_execution_header_update_invalid_execution_header_proof() { - let checkpoint = Box::new(load_checkpoint_update_fixture()); - let finalized_header_update = Box::new(load_finalized_header_update_fixture()); - let mut execution_header_update = Box::new(load_execution_header_update_fixture()); - execution_header_update.execution_branch[0] = TEST_HASH.into(); - - new_tester().execute_with(|| { - assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); - assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), finalized_header_update)); - assert_err!( - EthereumBeaconClient::submit_execution_header( - RuntimeOrigin::signed(1), - execution_header_update - ), - Error::::InvalidExecutionHeaderProof - ); - }); -} - -#[test] -fn submit_execution_header_update_that_skips_block() { - let checkpoint = Box::new(load_checkpoint_update_fixture()); - let finalized_header_update = Box::new(load_finalized_header_update_fixture()); - let execution_header_update = Box::new(load_execution_header_update_fixture()); - let mut skipped_block_execution_header_update = - Box::new(load_execution_header_update_fixture()); - let mut skipped_execution_header = - skipped_block_execution_header_update.execution_header.clone(); - - skipped_execution_header = match skipped_execution_header { - VersionedExecutionPayloadHeader::Capella(execution_payload_header) => { - let mut mut_execution_payload_header = execution_payload_header.clone(); - mut_execution_payload_header.block_number = execution_payload_header.block_number + 2; - VersionedExecutionPayloadHeader::Capella(mut_execution_payload_header) - }, - VersionedExecutionPayloadHeader::Deneb(execution_payload_header) => { - let mut mut_execution_payload_header = execution_payload_header.clone(); - mut_execution_payload_header.block_number = execution_payload_header.block_number + 2; - VersionedExecutionPayloadHeader::Deneb(mut_execution_payload_header) - }, - }; - - skipped_block_execution_header_update.execution_header = skipped_execution_header; - - new_tester().execute_with(|| { - assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); - assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), finalized_header_update)); - assert_ok!(EthereumBeaconClient::submit_execution_header( - RuntimeOrigin::signed(1), - execution_header_update.clone() - )); - assert!(>::contains_key( - execution_header_update.execution_header.block_hash() - )); - assert_err!( - EthereumBeaconClient::submit_execution_header( - RuntimeOrigin::signed(1), - skipped_block_execution_header_update - ), - Error::::ExecutionHeaderSkippedBlock - ); - }); -} - -#[test] -fn submit_execution_header_update_that_is_also_finalized_header_which_is_not_stored() { - let checkpoint = Box::new(load_checkpoint_update_fixture()); - let finalized_header_update = Box::new(load_finalized_header_update_fixture()); - let mut execution_header_update = Box::new(load_execution_header_update_fixture()); - execution_header_update.ancestry_proof = None; - - new_tester().execute_with(|| { - assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); - assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), finalized_header_update)); - assert_err!( - EthereumBeaconClient::submit_execution_header( - RuntimeOrigin::signed(1), - execution_header_update - ), - Error::::ExpectedFinalizedHeaderNotStored - ); - }); -} - -#[test] -fn submit_execution_header_update_that_is_also_finalized_header_which_is_stored_but_slots_dont_match( -) { - let checkpoint = Box::new(load_checkpoint_update_fixture()); - let finalized_header_update = Box::new(load_finalized_header_update_fixture()); - let mut execution_header_update = Box::new(load_execution_header_update_fixture()); - execution_header_update.ancestry_proof = None; - - new_tester().execute_with(|| { - assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); - assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), finalized_header_update)); - - let block_root: H256 = execution_header_update.header.hash_tree_root().unwrap(); - - >::insert( - block_root, - CompactBeaconState { - slot: execution_header_update.header.slot + 1, - block_roots_root: Default::default(), - }, - ); - LatestFinalizedBlockRoot::::set(block_root); - - assert_err!( - EthereumBeaconClient::submit_execution_header( - RuntimeOrigin::signed(1), - execution_header_update - ), - Error::::ExpectedFinalizedHeaderNotStored - ); - }); -} - -#[test] -fn submit_execution_header_not_finalized() { - let checkpoint = Box::new(load_checkpoint_update_fixture()); - let finalized_header_update = Box::new(load_finalized_header_update_fixture()); - let update = Box::new(load_execution_header_update_fixture()); - - new_tester().execute_with(|| { - assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); - assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), finalized_header_update)); - - >::mutate(>::get(), |x| { - let prev = x.unwrap(); - *x = Some(CompactBeaconState { slot: update.header.slot - 1, ..prev }); - }); - - assert_err!( - EthereumBeaconClient::submit_execution_header(RuntimeOrigin::signed(1), update), - Error::::HeaderNotFinalized - ); - }); -} - /// Check that a gap of more than 8192 slots between finalized headers is not allowed. #[test] fn submit_finalized_header_update_with_too_large_gap() { @@ -943,37 +639,21 @@ fn submit_finalized_header_update_with_gap_at_limit() { #[test] fn verify_message() { - let header = get_message_verification_header(); let (event_log, proof) = get_message_verification_payload(); - let block_hash = proof.block_hash; new_tester().execute_with(|| { - >::insert(block_hash, header); + assert_ok!(initialize_storage()); assert_ok!(EthereumBeaconClient::verify(&event_log, &proof)); }); } -#[test] -fn verify_message_missing_header() { - let (event_log, proof) = get_message_verification_payload(); - - new_tester().execute_with(|| { - assert_err!( - EthereumBeaconClient::verify(&event_log, &proof), - VerificationError::HeaderNotFound - ); - }); -} - #[test] fn verify_message_invalid_proof() { - let header = get_message_verification_header(); let (event_log, mut proof) = get_message_verification_payload(); - proof.data.1[0] = TEST_HASH.into(); - let block_hash = proof.block_hash; + proof.receipt_proof.1[0] = TEST_HASH.into(); new_tester().execute_with(|| { - >::insert(block_hash, header); + assert_ok!(initialize_storage()); assert_err!( EthereumBeaconClient::verify(&event_log, &proof), VerificationError::InvalidProof @@ -983,29 +663,28 @@ fn verify_message_invalid_proof() { #[test] fn verify_message_invalid_receipts_root() { - let mut header = get_message_verification_header(); - let (event_log, proof) = get_message_verification_payload(); - let block_hash = proof.block_hash; - header.receipts_root = TEST_HASH.into(); + let (event_log, mut proof) = get_message_verification_payload(); + let mut payload = deneb::ExecutionPayloadHeader::default(); + payload.receipts_root = TEST_HASH.into(); + proof.execution_proof.execution_header = VersionedExecutionPayloadHeader::Deneb(payload); new_tester().execute_with(|| { - >::insert(block_hash, header); + assert_ok!(initialize_storage()); assert_err!( EthereumBeaconClient::verify(&event_log, &proof), - VerificationError::InvalidProof + VerificationError::InvalidExecutionProof( + Error::::BlockBodyHashTreeRootFailed.into() + ) ); }); } #[test] fn verify_message_invalid_log() { - let header = get_message_verification_header(); let (mut event_log, proof) = get_message_verification_payload(); - let block_hash = proof.block_hash; event_log.topics = vec![H256::zero(); 10]; - new_tester().execute_with(|| { - >::insert(block_hash, header); + assert_ok!(initialize_storage()); assert_err!( EthereumBeaconClient::verify(&event_log, &proof), VerificationError::InvalidLog @@ -1015,13 +694,11 @@ fn verify_message_invalid_log() { #[test] fn verify_message_receipt_does_not_contain_log() { - let header = get_message_verification_header(); let (mut event_log, proof) = get_message_verification_payload(); - let block_hash = proof.block_hash; event_log.data = hex!("f9013c94ee9170abfbf9421ad6dd07f6bdec9d89f2b581e0f863a01b11dcf133cc240f682dab2d3a8e4cd35c5da8c9cf99adac4336f8512584c5ada000000000000000000000000000000000000000000000000000000000000003e8a00000000000000000000000000000000000000000000000000000000000000002b8c000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000068000f000000000000000101d184c103f7acc340847eee82a0b909e3358bc28d440edffa1352b13227e8ee646f3ea37456dec70100000101001cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c0000e8890423c78a0000000000000000000000000000000000000000000000000000000000000000").to_vec(); new_tester().execute_with(|| { - >::insert(block_hash, header); + assert_ok!(initialize_storage()); assert_err!( EthereumBeaconClient::verify(&event_log, &proof), VerificationError::LogNotFound @@ -1033,7 +710,6 @@ fn verify_message_receipt_does_not_contain_log() { fn set_operating_mode() { let checkpoint = Box::new(load_checkpoint_update_fixture()); let update = Box::new(load_finalized_header_update_fixture()); - let execution_header_update = Box::new(load_execution_header_update_fixture()); new_tester().execute_with(|| { assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); @@ -1047,14 +723,6 @@ fn set_operating_mode() { EthereumBeaconClient::submit(RuntimeOrigin::signed(1), update), Error::::Halted ); - - assert_noop!( - EthereumBeaconClient::submit_execution_header( - RuntimeOrigin::signed(1), - execution_header_update - ), - Error::::Halted - ); }); } @@ -1070,3 +738,107 @@ fn set_operating_mode_root_only() { ); }); } + +#[test] +fn verify_execution_proof_invalid_ancestry_proof() { + let checkpoint = Box::new(load_checkpoint_update_fixture()); + let finalized_header_update = Box::new(load_finalized_header_update_fixture()); + let mut execution_header_update = Box::new(load_execution_proof_fixture()); + if let Some(ref mut ancestry_proof) = execution_header_update.ancestry_proof { + ancestry_proof.header_branch[0] = TEST_HASH.into() + } + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), finalized_header_update)); + assert_err!( + EthereumBeaconClient::verify_execution_proof(&execution_header_update), + Error::::InvalidAncestryMerkleProof + ); + }); +} + +#[test] +fn verify_execution_proof_invalid_execution_header_proof() { + let checkpoint = Box::new(load_checkpoint_update_fixture()); + let finalized_header_update = Box::new(load_finalized_header_update_fixture()); + let mut execution_header_update = Box::new(load_execution_proof_fixture()); + execution_header_update.execution_branch[0] = TEST_HASH.into(); + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), finalized_header_update)); + assert_err!( + EthereumBeaconClient::verify_execution_proof(&execution_header_update), + Error::::InvalidExecutionHeaderProof + ); + }); +} + +#[test] +fn verify_execution_proof_that_is_also_finalized_header_which_is_not_stored() { + let checkpoint = Box::new(load_checkpoint_update_fixture()); + let finalized_header_update = Box::new(load_finalized_header_update_fixture()); + let mut execution_header_update = Box::new(load_execution_proof_fixture()); + execution_header_update.ancestry_proof = None; + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), finalized_header_update)); + assert_err!( + EthereumBeaconClient::verify_execution_proof(&execution_header_update), + Error::::ExpectedFinalizedHeaderNotStored + ); + }); +} + +#[test] +fn submit_execution_proof_that_is_also_finalized_header_which_is_stored_but_slots_dont_match() { + let checkpoint = Box::new(load_checkpoint_update_fixture()); + let finalized_header_update = Box::new(load_finalized_header_update_fixture()); + let mut execution_header_update = Box::new(load_execution_proof_fixture()); + execution_header_update.ancestry_proof = None; + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), finalized_header_update)); + + let block_root: H256 = execution_header_update.header.hash_tree_root().unwrap(); + + >::insert( + block_root, + CompactBeaconState { + slot: execution_header_update.header.slot + 1, + block_roots_root: Default::default(), + }, + ); + LatestFinalizedBlockRoot::::set(block_root); + + assert_err!( + EthereumBeaconClient::verify_execution_proof(&execution_header_update), + Error::::ExpectedFinalizedHeaderNotStored + ); + }); +} + +#[test] +fn verify_execution_proof_not_finalized() { + let checkpoint = Box::new(load_checkpoint_update_fixture()); + let finalized_header_update = Box::new(load_finalized_header_update_fixture()); + let update = Box::new(load_execution_proof_fixture()); + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), finalized_header_update)); + + >::mutate(>::get(), |x| { + let prev = x.unwrap(); + *x = Some(CompactBeaconState { slot: update.header.slot - 1, ..prev }); + }); + + assert_err!( + EthereumBeaconClient::verify_execution_proof(&update), + Error::::HeaderNotFinalized + ); + }); +} diff --git a/bridges/snowbridge/pallets/ethereum-client/src/types.rs b/bridges/snowbridge/pallets/ethereum-client/src/types.rs index 5dcefea9f80f4e201d8de633a7a323f530220a45..8808f989754b240d207101cbeff6fe25fe74279a 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/types.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/types.rs @@ -15,17 +15,7 @@ pub type CheckpointUpdate = primitives::CheckpointUpdate; pub type Update = primitives::Update; pub type NextSyncCommitteeUpdate = primitives::NextSyncCommitteeUpdate; -pub use primitives::ExecutionHeaderUpdate; - -/// ExecutionHeader ring buffer implementation -pub type ExecutionHeaderBuffer = RingBufferMapImpl< - u32, - ::MaxExecutionHeadersToKeep, - crate::ExecutionHeaderIndex, - crate::ExecutionHeaderMapping, - crate::ExecutionHeaders, - OptionQuery, ->; +pub use primitives::{AncestryProof, ExecutionProof}; /// FinalizedState ring buffer implementation pub(crate) type FinalizedBeaconStateBuffer = RingBufferMapImpl< diff --git a/bridges/snowbridge/pallets/ethereum-client/src/weights.rs b/bridges/snowbridge/pallets/ethereum-client/src/weights.rs index e1a5578f46615e6a75400631ea7d0cc00a0d90cb..e4629746aa2d9f8afe31e6af037b71a3c6f9b2bb 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/weights.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/weights.rs @@ -36,7 +36,6 @@ pub trait WeightInfo { fn force_checkpoint() -> Weight; fn submit() -> Weight; fn submit_with_sync_committee() -> Weight; - fn submit_execution_header() -> Weight; } // For backwards compatibility and tests @@ -59,10 +58,4 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(6)) .saturating_add(RocksDbWeight::get().writes(1)) } - fn submit_execution_header() -> Weight { - Weight::from_parts(113_158_000_u64, 0) - .saturating_add(Weight::from_parts(0, 3537)) - .saturating_add(RocksDbWeight::get().reads(5)) - .saturating_add(RocksDbWeight::get().writes(4)) - } } diff --git a/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/execution-header-update.json b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/execution-header-update.json deleted file mode 100755 index 319014249c12d9de36ee9e73e08cdeabcd954f63..0000000000000000000000000000000000000000 --- a/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/execution-header-update.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "header": { - "slot": 215, - "proposer_index": 2, - "parent_root": "0x97518f531a252bb6ca547b21aca9da767943ec99211d3b15c804e34c3a523f45", - "state_root": "0xb088b5a3a8c90d6dc919a695cd7bb0267c6f983ea2e675c559ceb8f46cb90b67", - "body_root": "0x0ba23c8224fdd01531d5ad51486353bd524a0b4c20bca704e26d3210616f829b" - }, - "ancestry_proof": { - "header_branch": [ - "0x97518f531a252bb6ca547b21aca9da767943ec99211d3b15c804e34c3a523f45", - "0x5ce0db996bd499c2b4f7a93263d5aafd052f420efb617cce6fdd54e25516aa45", - "0x84f0e373b66011ce774c7061440c0a50a51cce2b4b335395eee3e563d605597f", - "0x48f9ccc5f9594142c18c3b5c39a99f0549329c6ab3ba06c9a50030eadca87770", - "0xf89d6e311e05bc75a6f63ce118bccce254551f1a88d54c3b4f773f81f946bd99", - "0x2edd6d893c22636675147c07dfcdb541a146e87c3f15b51c388be4868246dc9b", - "0xd76b7de5f856e3208a91a42c9c398a7f4fab35e667bf916346050ae742514a2d", - "0x83a2e233e76385953ca41de4c3afe60471a61f0cc1b3846b4a0670e3e563b747", - "0xe783a5a109c2ad74e4eb53e8f6b11b31266a92a9e16c1fd5873109c5d41b282c", - "0xd4ea1ef3869ee6a0fd0b19d7d70027d144eecd4f1d32cbf47632a0a9069164b9", - "0xf8179564b58eb93a850d35e4156a04db651106442ad891c3e85155c1762792f1", - "0x4cbb1edb48cf1e32fb30db60aaaeaf6190ffe4d0c8dbc96cec307daecb78be12", - "0xb7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f" - ], - "finalized_block_root": "0x890a7f23b9ed2160901654be9efc575d6830ca860e2a97866ae3423fb7bd7231" - }, - "execution_header": { - "Deneb": { - "parent_hash": "0xd82ec63f5c5e6ba61d62f09c188f158e6449b94bdcc31941e68639eec3c4cf7a", - "fee_recipient": "0x0000000000000000000000000000000000000000", - "state_root": "0x8b65545fe5f3216b47b6339b9c91ca2b7f1032a970b04246d9e9fb4460ee34c3", - "receipts_root": "0x7b1f61b9714c080ef0be014e01657a15f45f0304b477beebc7ca5596c8033095", - "logs_bloom": "0x00000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000000000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000040004000000000000002000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000200000000000010", - "prev_randao": "0x6d9e2a012d82b1b6cb0a2c1c1ed24cc16dbb56e6e39ae545371e0666ab057862", - "block_number": 215, - "gas_limit": 64842908, - "gas_used": 119301, - "timestamp": 1705859527, - "extra_data": "0xd983010d0a846765746888676f312e32312e358664617277696e", - "base_fee_per_gas": 7, - "block_hash": "0x48498dbfbcfae53a7f4c289ee00747aceea925f6260c50ead5a33e1c55c40f98", - "transactions_root": "0x5ebc1347fe3df0611d4f66b19bd8e1c6f4eaed0371d850f14c83b1c77ea234e6", - "withdrawals_root": "0x792930bbd5baac43bcc798ee49aa8185ef76bb3b44ba62b91d86ae569e4bb535", - "blob_gas_used": 0, - "excess_blob_gas": 0 - } - }, - "execution_branch": [ - "0xf8c69d3830406d668619bcccc13c8dddde41e863326f7418b241d5924c4ad34a", - "0xb46f0c01805fe212e15907981b757e6c496b0cb06664224655613dcec82505bb", - "0xdb56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71", - "0xf4d6b5cf9c6e212615c3674fa625d04eb1114153fb221ef5ad02aa433fc67cfb" - ] -} \ No newline at end of file diff --git a/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/execution-proof.json b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/execution-proof.json new file mode 100755 index 0000000000000000000000000000000000000000..f55898087dfec122d84f699ee75cfe9aadbb4d6d --- /dev/null +++ b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/execution-proof.json @@ -0,0 +1,54 @@ +{ + "header": { + "slot": 393, + "proposer_index": 4, + "parent_root": "0x6545b47a614a1dd4cad042a0cdbbf5be347e8ffcdc02c6c64540d5153acebeef", + "state_root": "0xb62ac34a8cb82497be9542fe2114410c9f6021855b766015406101a1f3d86434", + "body_root": "0x04005fe231e11a5b7b1580cb73b177ae8b338bedd745497e6bb7122126a806db" + }, + "ancestry_proof": { + "header_branch": [ + "0x6545b47a614a1dd4cad042a0cdbbf5be347e8ffcdc02c6c64540d5153acebeef", + "0xfa84cc88ca53a72181599ff4eb07d8b444bce023fe2347c3b4f51004c43439d3", + "0xcadc8ae211c6f2221c9138e829249adf902419c78eb4727a150baa4d9a02cc9d", + "0x33a89962df08a35c52bd7e1d887cd71fa7803e68787d05c714036f6edf75947c", + "0x2c9760fce5c2829ef3f25595a703c21eb22d0186ce223295556ed5da663a82cf", + "0xe1aa87654db79c8a0ecd6c89726bb662fcb1684badaef5cd5256f479e3c622e1", + "0xaa70d5f314e4a1fbb9c362f3db79b21bf68b328887248651fbd29fc501d0ca97", + "0x160b6c235b3a1ed4ef5f80b03ee1c76f7bf3f591c92fca9d8663e9221b9f9f0f", + "0xf68d7dcd6a07a18e9de7b5d2aa1980eb962e11d7dcb584c96e81a7635c8d2535", + "0x1d5f912dfd6697110dd1ecb5cb8e77952eef57d85deb373572572df62bb157fc", + "0xffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b", + "0x6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220", + "0xb7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f" + ], + "finalized_block_root": "0x751414cd97c0624f922b3e80285e9f776b08fa22fd5f87391f2ed7ef571a8d46" + }, + "execution_header": { + "Deneb": { + "parent_hash": "0x8092290aa21b7751576440f77edd02a94058429ce50e63a92d620951fb25eda2", + "fee_recipient": "0x0000000000000000000000000000000000000000", + "state_root": "0x96a83e9ddf745346fafcb0b03d57314623df669ed543c110662b21302a0fae8b", + "receipts_root": "0xdccdfceea05036f7b61dcdabadc937945d31e68a8d3dfd4dc85684457988c284", + "logs_bloom": "0x00000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000400000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000080000000000000000000000000000040004000000000000002002002000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000080000000000000000000000000000000000100000000000000000200000200000010", + "prev_randao": "0x62e309d4f5119d1f5c783abc20fc1a549efbab546d8d0b25ff1cfd58be524e67", + "block_number": 393, + "gas_limit": 54492273, + "gas_used": 199644, + "timestamp": 1710552813, + "extra_data": "0xd983010d0b846765746888676f312e32312e368664617277696e", + "base_fee_per_gas": 7, + "block_hash": "0x6a9810efb9581d30c1a5c9074f27c68ea779a8c1ae31c213241df16225f4e131", + "transactions_root": "0x2cfa6ed7327e8807c7973516c5c32a68ef2459e586e8067e113d081c3bd8c07d", + "withdrawals_root": "0x792930bbd5baac43bcc798ee49aa8185ef76bb3b44ba62b91d86ae569e4bb535", + "blob_gas_used": 0, + "excess_blob_gas": 0 + } + }, + "execution_branch": [ + "0xa6833fa629f3286b6916c6e50b8bf089fc9126bee6f64d0413b4e59c1265834d", + "0xb46f0c01805fe212e15907981b757e6c496b0cb06664224655613dcec82505bb", + "0xdb56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71", + "0xd3af7c05c516726be7505239e0b9c7cb53d24abce6b91cdb3b3995f0164a75da" + ] +} \ No newline at end of file diff --git a/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/finalized-header-update.json b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/finalized-header-update.json index f9d5324d57b15fc657876b46bbf4a8e716d4c30c..2dec5cc56fac60b924053573f5962c02b487b24b 100755 --- a/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/finalized-header-update.json +++ b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/finalized-header-update.json @@ -1,38 +1,40 @@ { "attested_header": { - "slot": 2566, - "proposer_index": 6, - "parent_root": "0x6eb9f13a2c496318ce1ab3087bbd872f5c9519a1a7ca8231a2453e3cb523af00", - "state_root": "0xc8cb12766113dff7e46d2917267bf33d0626d99dd47715fcdbc5c65fad3c04b4", - "body_root": "0xd8cfd0d7bc9bc3724417a1655bb0a67c0765ca36197320f4d834150b52ef1420" + "slot": 933, + "proposer_index": 1, + "parent_root": "0xf5fc63e2780ca302b97aea73fc95d74d702b5afe9a772c2b68f695026337b620", + "state_root": "0xd856d11636bc4d866e78be9e747b222b0977556a367ab42e4085277301438050", + "body_root": "0x5689091ab4eb76c2e876271add4924e1c66ce987c300c24aac2ad8c703e9a33f" }, "sync_aggregate": { "sync_committee_bits": "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "sync_committee_signature": "0x9296f9a0387f2cac47008e22ad7c3cd3d49d35384c13e6aa1eacca7dca7c3d2ca81515e50eb3396b9550ed20ef7d8fa2049a186598feb2c00e93728045fcff917733d1827481b8fc95f3913e27fc70112c2490496eb57bb7181f02c3f9fd471f" + "sync_committee_signature": "0x93a3d482fe2a2f7fd2b634169752a8fddf1dc28b23a020b398be8526faf37a74ca0f6db1bed78a9c7256c09a6115235e108e0e8a7ce09287317b0856c4b77dfa5adba6cf4c3ebea5bfa4cd2fcde80fd0a532f2defe65d530201d5d2258796559" }, - "signature_slot": 2567, + "signature_slot": 934, "next_sync_committee_update": null, "finalized_header": { - "slot": 2496, - "proposer_index": 2, - "parent_root": "0xc99e49787106733eeebab4d93eb326e1f2214575c9d928f0c4ab0da0776f1622", - "state_root": "0xfbf8a08c86ef36bd173e37e733da4a78aa8e85fee99a990e858dd12a59087fde", - "body_root": "0xa2a8ad06901447b2807a9059580a4c40d8a941f325b1343c69f7c7c6c90e4ab0" + "slot": 864, + "proposer_index": 4, + "parent_root": "0x614e7672f991ac268cd841055973f55e1e42228831a211adef207bb7329be614", + "state_root": "0x5fa8dfca3d760e4242ab46d529144627aa85348a19173b6e081172c701197a4a", + "body_root": "0x0f34c083b1803666bb1ac5e73fa71582731a2cf37d279ff0a3b0cad5a2ff371e" }, "finality_branch": [ - "0x4e00000000000000000000000000000000000000000000000000000000000000", + "0x1b00000000000000000000000000000000000000000000000000000000000000", "0x10c726fac935bf9657cc7476d3cfa7bedec5983dcfb59e8a7df6d0a619e108d7", "0x98e9116c6bb7f20de18800dc63e73e689d06d6a47d35b5e2b32cf093d475840d", - "0x958b8e43347f6df6fa5eb3d62d06a862381a6585aa40640dd1c0de11f1cf89c1", - "0xf107dce04faa86a28fc5d4a618be9cb8d4fc3c23d6c42c3624f3ff4bf6586a03", - "0xa501cdc02e86969ac3e4d0c5a36f4f049efaa1ab8cb6693f51d130eb52a80f30" + "0xf12d9aededc72724e417b518fe6f847684f26f81616243dedf8c551cc7d504f5", + "0x89a85d0907ab3fd6e00ae385f61d456c6191646404ae7b8d23d0e60440cf4d00", + "0x9fc943b6020eb61d780d78bcc6f6102a81d2c868d58f36e61c6e286a2dc4d8c2" ], - "block_roots_root": "0xd160b7687041891b73e54b06fc4e04f82d0fa8fdd76705895e216c6b24709dfe", + "block_roots_root": "0xb9aab9c388c4e4fcd899b71f62c498fc73406e38e8eb14aa440e9affa06f2a10", "block_roots_branch": [ - "0x105290e42d98ab6a0ada6e55453cede36c672abf645eeb986b88d7487616e135", - "0x9da41f274bcdf6122335443d9ce94d07163b48dba3e2f9499ff56f4e48b48b99", - "0xecea7e1d3152d8130e83afdfe34b4de4ba2b69a33c9471991096daf454de9cf5", - "0xb2bf1758e50b2bfff29169fbc70fdb884b2b05bb615dbc53567574da6f4f1ae2", - "0xcd87069daf70975779126d6af833b7d636c75ca4d5e750ebcad0e76408a5e5bf" - ] + "0x733422bd810895dab74cbbe07c69dd440cbb51f573181ad4dddac30fcdd0f41f", + "0x9b9eca73ab01d14549c325ba1b4610bb20bf1f8ec2dbd649f9d8cc7f3cea75fa", + "0xbcc666ad0ad9f9725cbd682bc95589d35b1b53b2a615f1e6e8dd5e086336becf", + "0x3069b547a08f703a1715016e926cbd64e71f93f64fb68d98d8c8f1ab745c46e5", + "0xc2de7e1097239404e17b263cfa0473533cc41e903cb03440d633bc5c27314cb4" + ], + "execution_header": null, + "execution_branch": null } \ No newline at end of file diff --git a/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/inbound-message.json b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/inbound-message.json index 5aa5a59f023761e248fd23ec3b29de19092e5e90..6589dca5fb456549d201b00402b2b2fa375d7b49 100644 --- a/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/inbound-message.json +++ b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/inbound-message.json @@ -1,31 +1,79 @@ { - "execution_header": { - "parent_hash": "0xd82ec63f5c5e6ba61d62f09c188f158e6449b94bdcc31941e68639eec3c4cf7a", - "state_root": "0x8b65545fe5f3216b47b6339b9c91ca2b7f1032a970b04246d9e9fb4460ee34c3", - "receipts_root": "0x7b1f61b9714c080ef0be014e01657a15f45f0304b477beebc7ca5596c8033095", - "block_number": 215 + "event_log": { + "address": "0xeda338e4dc46038493b885327842fd3e301cab39", + "topics": [ + "0x7153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84f", + "0xc173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539", + "0x5f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0" + ], + "data": "0x00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e00a736aa00000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d00e40b54020000000000000000000000000000000000000000000000000000000000" }, - "message": { - "event_log": { - "address": "0xeda338e4dc46038493b885327842fd3e301cab39", - "topics": [ - "0x7153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84f", - "0xc173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539", - "0x5f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0" + "proof": { + "block_hash": "0x6a9810efb9581d30c1a5c9074f27c68ea779a8c1ae31c213241df16225f4e131", + "tx_index": 0, + "receipt_proof": { + "keys": [ + "0xdccdfceea05036f7b61dcdabadc937945d31e68a8d3dfd4dc85684457988c284", + "0x4a98e45a319168b0fc6005ce6b744ee9bf54338e2c0784b976a8578d241ced0f" ], - "data": "0x00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e00a736aa00000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d00e40b54020000000000000000000000000000000000000000000000000000000000" + "values": [ + "0xf851a09c01dd6d2d8de951c45af23d3ad00829ce021c04d6c8acbe1612d456ee320d4980808080808080a04a98e45a319168b0fc6005ce6b744ee9bf54338e2c0784b976a8578d241ced0f8080808080808080", + "0xf9028c30b9028802f90284018301d205b9010000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000000000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000040004000000000000002000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000200000000000010f90179f85894eda338e4dc46038493b885327842fd3e301cab39e1a0f78bb28d4b1d7da699e5c0bc2be29c2b04b5aab6aacf6298fe5304f9db9c6d7ea000000000000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7df9011c94eda338e4dc46038493b885327842fd3e301cab39f863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a05f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0b8a000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e00a736aa00000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d00e40b54020000000000000000000000000000000000000000000000000000000000" + ] }, - "Proof": { - "block_hash": "0x48498dbfbcfae53a7f4c289ee00747aceea925f6260c50ead5a33e1c55c40f98", - "tx_index": 0, - "data": { - "keys": [ - "0x7b1f61b9714c080ef0be014e01657a15f45f0304b477beebc7ca5596c8033095" + "execution_proof": { + "header": { + "slot": 393, + "proposer_index": 4, + "parent_root": "0x6545b47a614a1dd4cad042a0cdbbf5be347e8ffcdc02c6c64540d5153acebeef", + "state_root": "0xb62ac34a8cb82497be9542fe2114410c9f6021855b766015406101a1f3d86434", + "body_root": "0x04005fe231e11a5b7b1580cb73b177ae8b338bedd745497e6bb7122126a806db" + }, + "ancestry_proof": { + "header_branch": [ + "0x6545b47a614a1dd4cad042a0cdbbf5be347e8ffcdc02c6c64540d5153acebeef", + "0xfa84cc88ca53a72181599ff4eb07d8b444bce023fe2347c3b4f51004c43439d3", + "0xcadc8ae211c6f2221c9138e829249adf902419c78eb4727a150baa4d9a02cc9d", + "0x33a89962df08a35c52bd7e1d887cd71fa7803e68787d05c714036f6edf75947c", + "0x2c9760fce5c2829ef3f25595a703c21eb22d0186ce223295556ed5da663a82cf", + "0xe1aa87654db79c8a0ecd6c89726bb662fcb1684badaef5cd5256f479e3c622e1", + "0xaa70d5f314e4a1fbb9c362f3db79b21bf68b328887248651fbd29fc501d0ca97", + "0x160b6c235b3a1ed4ef5f80b03ee1c76f7bf3f591c92fca9d8663e9221b9f9f0f", + "0xf68d7dcd6a07a18e9de7b5d2aa1980eb962e11d7dcb584c96e81a7635c8d2535", + "0x1d5f912dfd6697110dd1ecb5cb8e77952eef57d85deb373572572df62bb157fc", + "0xffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b", + "0x6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220", + "0xb7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f" ], - "values": [ - "0xf9028e822080b9028802f90284018301d205b9010000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000000000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000040004000000000000002000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000200000000000010f90179f85894eda338e4dc46038493b885327842fd3e301cab39e1a0f78bb28d4b1d7da699e5c0bc2be29c2b04b5aab6aacf6298fe5304f9db9c6d7ea000000000000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7df9011c94eda338e4dc46038493b885327842fd3e301cab39f863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a05f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0b8a000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e00a736aa00000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d00e40b54020000000000000000000000000000000000000000000000000000000000" - ] - } + "finalized_block_root": "0x751414cd97c0624f922b3e80285e9f776b08fa22fd5f87391f2ed7ef571a8d46" + }, + "execution_header": { + "Deneb": { + "parent_hash": "0x8092290aa21b7751576440f77edd02a94058429ce50e63a92d620951fb25eda2", + "fee_recipient": "0x0000000000000000000000000000000000000000", + "state_root": "0x96a83e9ddf745346fafcb0b03d57314623df669ed543c110662b21302a0fae8b", + "receipts_root": "0xdccdfceea05036f7b61dcdabadc937945d31e68a8d3dfd4dc85684457988c284", + "logs_bloom": "0x00000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000400000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000080000000000000000000000000000040004000000000000002002002000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000080000000000000000000000000000000000100000000000000000200000200000010", + "prev_randao": "0x62e309d4f5119d1f5c783abc20fc1a549efbab546d8d0b25ff1cfd58be524e67", + "block_number": 393, + "gas_limit": 54492273, + "gas_used": 199644, + "timestamp": 1710552813, + "extra_data": "0xd983010d0b846765746888676f312e32312e368664617277696e", + "base_fee_per_gas": 7, + "block_hash": "0x6a9810efb9581d30c1a5c9074f27c68ea779a8c1ae31c213241df16225f4e131", + "transactions_root": "0x2cfa6ed7327e8807c7973516c5c32a68ef2459e586e8067e113d081c3bd8c07d", + "withdrawals_root": "0x792930bbd5baac43bcc798ee49aa8185ef76bb3b44ba62b91d86ae569e4bb535", + "blob_gas_used": 0, + "excess_blob_gas": 0 + } + }, + "execution_branch": [ + "0xa6833fa629f3286b6916c6e50b8bf089fc9126bee6f64d0413b4e59c1265834d", + "0xb46f0c01805fe212e15907981b757e6c496b0cb06664224655613dcec82505bb", + "0xdb56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71", + "0xd3af7c05c516726be7505239e0b9c7cb53d24abce6b91cdb3b3995f0164a75da" + ] } } } \ No newline at end of file diff --git a/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/initial-checkpoint.json b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/initial-checkpoint.json index 202790c1db5b53068c7aedf561dc7972cb863f00..a62d646617e49ee11c1d68e401069c5bdf4368be 100755 --- a/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/initial-checkpoint.json +++ b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/initial-checkpoint.json @@ -1,10 +1,10 @@ { "header": { - "slot": 2496, - "proposer_index": 2, - "parent_root": "0xc99e49787106733eeebab4d93eb326e1f2214575c9d928f0c4ab0da0776f1622", - "state_root": "0xfbf8a08c86ef36bd173e37e733da4a78aa8e85fee99a990e858dd12a59087fde", - "body_root": "0xa2a8ad06901447b2807a9059580a4c40d8a941f325b1343c69f7c7c6c90e4ab0" + "slot": 864, + "proposer_index": 4, + "parent_root": "0x614e7672f991ac268cd841055973f55e1e42228831a211adef207bb7329be614", + "state_root": "0x5fa8dfca3d760e4242ab46d529144627aa85348a19173b6e081172c701197a4a", + "body_root": "0x0f34c083b1803666bb1ac5e73fa71582731a2cf37d279ff0a3b0cad5a2ff371e" }, "current_sync_committee": { "pubkeys": [ @@ -525,18 +525,18 @@ }, "current_sync_committee_branch": [ "0x3ade38d498a062b50880a9409e1ca3a7fd4315d91eeb3bb83e56ac6bfe8d6a59", - "0x93880225bf99a0c5ec22b266ff829837754e9c5edf37a68c05b8f803fd82fa45", - "0x4c60656ec9a95fcf11030ad309c716b5b15beb7f60a0bcfc7c9d4eff505472ff", - "0x22d1645fceb4bf9a695043dda19a53e784ec70df6a6b1bd66ea30eba1cca5f2f", - "0xa8fc6cad84ceefc633ec56c2d031d525e1cb4b51c70eb252919fce5bba9a1fde" + "0xa9e90f89e7f90fd5d79a6bbcaf40ba5cfc05ab1b561ac51c84867c32248d5b1e", + "0xbd1a76b03e02402bb24a627de1980a80ab17691980271f597b844b89b497ef75", + "0x07bbcd27c7cad089023db046eda17e8209842b7d97add8b873519e84fe6480e7", + "0x94c11eeee4cb6192bf40810f23486d8c75dfbc2b6f28d988d6f74435ede243b0" ], "validators_root": "0x270d43e74ce340de4bca2b1936beca0f4f5408d9e78aec4850920baf659d5b69", - "block_roots_root": "0xd160b7687041891b73e54b06fc4e04f82d0fa8fdd76705895e216c6b24709dfe", + "block_roots_root": "0xb9aab9c388c4e4fcd899b71f62c498fc73406e38e8eb14aa440e9affa06f2a10", "block_roots_branch": [ - "0x105290e42d98ab6a0ada6e55453cede36c672abf645eeb986b88d7487616e135", - "0x9da41f274bcdf6122335443d9ce94d07163b48dba3e2f9499ff56f4e48b48b99", - "0xecea7e1d3152d8130e83afdfe34b4de4ba2b69a33c9471991096daf454de9cf5", - "0xb2bf1758e50b2bfff29169fbc70fdb884b2b05bb615dbc53567574da6f4f1ae2", - "0xcd87069daf70975779126d6af833b7d636c75ca4d5e750ebcad0e76408a5e5bf" + "0x733422bd810895dab74cbbe07c69dd440cbb51f573181ad4dddac30fcdd0f41f", + "0x9b9eca73ab01d14549c325ba1b4610bb20bf1f8ec2dbd649f9d8cc7f3cea75fa", + "0xbcc666ad0ad9f9725cbd682bc95589d35b1b53b2a615f1e6e8dd5e086336becf", + "0x3069b547a08f703a1715016e926cbd64e71f93f64fb68d98d8c8f1ab745c46e5", + "0xc2de7e1097239404e17b263cfa0473533cc41e903cb03440d633bc5c27314cb4" ] } \ No newline at end of file diff --git a/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/sync-committee-update.json b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/sync-committee-update.json index 6bf20355c7a34ea3b1a9949f39cabc363a029590..4d601d7d8f0bbc0a3eda4d3afa927c076f290301 100755 --- a/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/sync-committee-update.json +++ b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/sync-committee-update.json @@ -2,13 +2,13 @@ "attested_header": { "slot": 129, "proposer_index": 5, - "parent_root": "0xe32b6c18f029e755b0273dc1c4fa2bc4979794c8286ad40276c1b8a8e36049d8", - "state_root": "0x5ec9dacf25a5f09f20be0c59246b3d8dcfe64bd085b4bac5cec180690339801e", - "body_root": "0x4080cf2412d6ff77fc3164ad6155423a7112f207f173145ec16371a93f481f87" + "parent_root": "0xc2def03fe44a2802130ca1a6d8406e4ccf4f344fec7075d4d84431cd4a8b0904", + "state_root": "0xfa62cde6666add7353d7aedcb61ebe3c6c84b5361e34f814825b1250affb5be4", + "body_root": "0x0f9c69f243fe7b5fa5860396c66c720a9e8b1e526e7914188930497cc4a9134c" }, "sync_aggregate": { "sync_committee_bits": "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "sync_committee_signature": "0xa761c3333fbb3d36bc8f65454f898da38001499dcd37494cf3d86940a995399ae649216ba4c985af154f83f72c8b1856079b7636a7a8d7d3f7602df2cbf699edb72b65253e82de4d9cc4db7377eafb22f799129f63f094a21c00675bdd5cc243" + "sync_committee_signature": "0x810cfde2afea3e276256c09bdf1cd321c33dcadeefddcfd24f488e6f756d917cfda90b5b437b3a4b4ef880985afa28a40cf565ec0a82877ddee36adc01d55d9d4a911ae3e22556e4c2636f1c707366fba019fb49450440fcd263d0b054b04bf0" }, "signature_slot": 130, "next_sync_committee_update": { @@ -531,33 +531,35 @@ }, "next_sync_committee_branch": [ "0x3ade38d498a062b50880a9409e1ca3a7fd4315d91eeb3bb83e56ac6bfe8d6a59", - "0xfd1e5ff5d4a15081efe3ff17857b1f95984c9a271b1c41c2f81f43e60c2cc541", - "0xe1c97f93bb7352d395d1ff8ee29881572cb7eb5d71634783701171dcd30cd93d", - "0x77fa2170ddbd89b15dae02f2e6cf9f76c8e00d1c4217320acffbe01576d0da61", - "0xe97288e0627219087a024078d69445f34f0583a6350a7c3c40c39fd1fa6f8d68" + "0x43276bee17fc9fba3f4866e902f0e5b5b308d79db91154bb8bf819973837a7d9", + "0x5572348e13ce59446ca0ea7cfeed07579da05f121920a76559e19bda94dd81cd", + "0x2d58adca9f3c742530de037f1933d6de1920ea4b68581613d4bc32b71547f221", + "0x7072b3c6577cd5a89b3234968f316f54630bb97eafbdb59e5b61637a9640255f" ] }, "finalized_header": { "slot": 64, "proposer_index": 4, - "parent_root": "0x0f7bc2353778c14c7f6dba0fc5fe6eec87228b0d3a5447b61dce67b4d9338de3", - "state_root": "0xfeb990de653ce494c0a263f820eaf05a9300dbdc30cb6065ede602827bfccde4", - "body_root": "0xf5235cd8c24f2695fc5b7989926305c10ad8cf5a87d62a739f675f5543df2ec1" + "parent_root": "0xa876486aaad7ddb897f369fd22d0a9903cd61d00c9e0dfe7998dd68d1008c678", + "state_root": "0x818e21c3388575f8ccc9ff17ec79d5a57915bcd31bccf47770f65a18e068416b", + "body_root": "0x1d1f73b864b3bb7e11ff91b56ca1381e0f9ca8122b2c542db88243604c763019" }, "finality_branch": [ "0x0200000000000000000000000000000000000000000000000000000000000000", "0x10c726fac935bf9657cc7476d3cfa7bedec5983dcfb59e8a7df6d0a619e108d7", "0x98e9116c6bb7f20de18800dc63e73e689d06d6a47d35b5e2b32cf093d475840d", - "0xe1c97f93bb7352d395d1ff8ee29881572cb7eb5d71634783701171dcd30cd93d", - "0x77fa2170ddbd89b15dae02f2e6cf9f76c8e00d1c4217320acffbe01576d0da61", - "0xe97288e0627219087a024078d69445f34f0583a6350a7c3c40c39fd1fa6f8d68" + "0x5572348e13ce59446ca0ea7cfeed07579da05f121920a76559e19bda94dd81cd", + "0x2d58adca9f3c742530de037f1933d6de1920ea4b68581613d4bc32b71547f221", + "0x7072b3c6577cd5a89b3234968f316f54630bb97eafbdb59e5b61637a9640255f" ], - "block_roots_root": "0x6fcdfd1c3fb1bdd421fe59dddfff3855b5ed5e30373887991a0059d019ad12bc", + "block_roots_root": "0x715b08694bef183a6d94b3113d16a7129f89fc3edec85a7e0eaf6ef9153552ef", "block_roots_branch": [ - "0x94b59531f172bc24f914bc0c10104ccb158676850f8cc3b47b6ddb7f096ebdd7", - "0x22470ed9155a938587d44d5fa19217c0f939d8862e504e67cd8cb4d1b960795e", - "0xfeec3ef1a68f93849e71e84f90b99602cccc31868137b6887ca8244a4b979e8e", + "0x4028c72c71b6ce80ea7d18b2c9471f4e4fa39746261a9921e832a4a2f9bdf7bb", + "0x75f98062661785d3290b7bd998b64446582baa49210733fd4603e1a97cd45a44", + "0x6fb757f44052f30c464810f01b0132adfa1a5446d8715b41e9af88eee1ee3e65", "0x5340ad5877c72dca689ca04bc8fedb78d67a4801d99887937edd8ccd29f87e82", - "0xf5ff4b0c6190005015889879568f5f0d9c40134c7ec4ffdda47950dcd92395ad" - ] + "0xf2b3cb56753939a728ccad399a434ca490f018f2f331529ec0d8b2d59c509271" + ], + "execution_header": null, + "execution_branch": null } \ No newline at end of file diff --git a/bridges/snowbridge/pallets/inbound-queue/Cargo.toml b/bridges/snowbridge/pallets/inbound-queue/Cargo.toml index b850496cd4e14cd906565d488450b339a29f463f..9fc1f31fbf7c0d0e76c67ae126fddce66ca82b83 100644 --- a/bridges/snowbridge/pallets/inbound-queue/Cargo.toml +++ b/bridges/snowbridge/pallets/inbound-queue/Cargo.toml @@ -42,7 +42,7 @@ snowbridge-core = { path = "../../primitives/core", default-features = false } snowbridge-ethereum = { path = "../../primitives/ethereum", default-features = false } snowbridge-router-primitives = { path = "../../primitives/router", default-features = false } snowbridge-beacon-primitives = { path = "../../primitives/beacon", default-features = false } -snowbridge-pallet-inbound-queue-fixtures = { path = "./fixtures", default-features = false, optional = true } +snowbridge-pallet-inbound-queue-fixtures = { path = "fixtures", default-features = false, optional = true } [dev-dependencies] frame-benchmarking = { path = "../../../../substrate/frame/benchmarking" } diff --git a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/lib.rs b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/lib.rs index 4f3445b2905364147d3974988fee0fad2f387d07..00adcdfa186adc755273b9753d833a6bdbec5b56 100644 --- a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/lib.rs +++ b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/lib.rs @@ -2,17 +2,6 @@ // SPDX-FileCopyrightText: 2023 Snowfork #![cfg_attr(not(feature = "std"), no_std)] -use snowbridge_beacon_primitives::CompactExecutionHeader; -use snowbridge_core::inbound::Message; -use sp_core::RuntimeDebug; - pub mod register_token; -pub mod register_token_with_insufficient_fee; pub mod send_token; pub mod send_token_to_penpal; - -#[derive(Clone, RuntimeDebug)] -pub struct InboundQueueFixture { - pub execution_header: CompactExecutionHeader, - pub message: Message, -} diff --git a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/register_token.rs b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/register_token.rs index b8d510e6b13d02420191591ff917eb0366f818d6..340b2fadfacfd9bdb0c291323cbd69cd71c00b7c 100644 --- a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/register_token.rs +++ b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/register_token.rs @@ -3,20 +3,16 @@ // Generated, do not edit! // See ethereum client README.md for instructions to generate -use crate::InboundQueueFixture; use hex_literal::hex; -use snowbridge_beacon_primitives::CompactExecutionHeader; -use snowbridge_core::inbound::{Log, Message, Proof}; +use snowbridge_beacon_primitives::{ + types::deneb, AncestryProof, BeaconHeader, ExecutionProof, VersionedExecutionPayloadHeader, +}; +use snowbridge_core::inbound::{InboundQueueFixture, Log, Message, Proof}; +use sp_core::U256; use sp_std::vec; pub fn make_register_token_message() -> InboundQueueFixture { InboundQueueFixture { - execution_header: CompactExecutionHeader{ - parent_hash: hex!("d5de3dd02c96dbdc8aaa4db70a1e9fdab5ded5f4d52f18798acd56a3d37d1ad6").into(), - block_number: 772, - state_root: hex!("49cba2a79b23ad74cefe80c3a96699825d1cda0f75bfceb587c5549211c86245").into(), - receipts_root: hex!("7b1f61b9714c080ef0be014e01657a15f45f0304b477beebc7ca5596c8033095").into(), - }, message: Message { event_log: Log { address: hex!("eda338e4dc46038493b885327842fd3e301cab39").into(), @@ -28,14 +24,74 @@ pub fn make_register_token_message() -> InboundQueueFixture { data: hex!("00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e00a736aa00000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d00e40b54020000000000000000000000000000000000000000000000000000000000").into(), }, proof: Proof { - block_hash: hex!("392182a385b3a417e8ddea8b252953ee81e6ec0fb09d9056c96c89fbeb703a3f").into(), - tx_index: 0, - data: (vec![ - hex!("7b1f61b9714c080ef0be014e01657a15f45f0304b477beebc7ca5596c8033095").to_vec(), + receipt_proof: (vec![ + hex!("dccdfceea05036f7b61dcdabadc937945d31e68a8d3dfd4dc85684457988c284").to_vec(), + hex!("4a98e45a319168b0fc6005ce6b744ee9bf54338e2c0784b976a8578d241ced0f").to_vec(), ], vec![ - hex!("f9028e822080b9028802f90284018301d205b9010000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000000000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000040004000000000000002000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000200000000000010f90179f85894eda338e4dc46038493b885327842fd3e301cab39e1a0f78bb28d4b1d7da699e5c0bc2be29c2b04b5aab6aacf6298fe5304f9db9c6d7ea000000000000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7df9011c94eda338e4dc46038493b885327842fd3e301cab39f863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a05f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0b8a000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e00a736aa00000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d00e40b54020000000000000000000000000000000000000000000000000000000000").to_vec(), + hex!("f851a09c01dd6d2d8de951c45af23d3ad00829ce021c04d6c8acbe1612d456ee320d4980808080808080a04a98e45a319168b0fc6005ce6b744ee9bf54338e2c0784b976a8578d241ced0f8080808080808080").to_vec(), + hex!("f9028c30b9028802f90284018301d205b9010000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000000000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000040004000000000000002000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000200000000000010f90179f85894eda338e4dc46038493b885327842fd3e301cab39e1a0f78bb28d4b1d7da699e5c0bc2be29c2b04b5aab6aacf6298fe5304f9db9c6d7ea000000000000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7df9011c94eda338e4dc46038493b885327842fd3e301cab39f863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a05f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0b8a000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e00a736aa00000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7d00e40b54020000000000000000000000000000000000000000000000000000000000").to_vec(), ]), + execution_proof: ExecutionProof { + header: BeaconHeader { + slot: 393, + proposer_index: 4, + parent_root: hex!("6545b47a614a1dd4cad042a0cdbbf5be347e8ffcdc02c6c64540d5153acebeef").into(), + state_root: hex!("b62ac34a8cb82497be9542fe2114410c9f6021855b766015406101a1f3d86434").into(), + body_root: hex!("04005fe231e11a5b7b1580cb73b177ae8b338bedd745497e6bb7122126a806db").into(), + }, + ancestry_proof: Some(AncestryProof { + header_branch: vec![ + hex!("6545b47a614a1dd4cad042a0cdbbf5be347e8ffcdc02c6c64540d5153acebeef").into(), + hex!("fa84cc88ca53a72181599ff4eb07d8b444bce023fe2347c3b4f51004c43439d3").into(), + hex!("cadc8ae211c6f2221c9138e829249adf902419c78eb4727a150baa4d9a02cc9d").into(), + hex!("33a89962df08a35c52bd7e1d887cd71fa7803e68787d05c714036f6edf75947c").into(), + hex!("2c9760fce5c2829ef3f25595a703c21eb22d0186ce223295556ed5da663a82cf").into(), + hex!("e1aa87654db79c8a0ecd6c89726bb662fcb1684badaef5cd5256f479e3c622e1").into(), + hex!("aa70d5f314e4a1fbb9c362f3db79b21bf68b328887248651fbd29fc501d0ca97").into(), + hex!("160b6c235b3a1ed4ef5f80b03ee1c76f7bf3f591c92fca9d8663e9221b9f9f0f").into(), + hex!("f68d7dcd6a07a18e9de7b5d2aa1980eb962e11d7dcb584c96e81a7635c8d2535").into(), + hex!("1d5f912dfd6697110dd1ecb5cb8e77952eef57d85deb373572572df62bb157fc").into(), + hex!("ffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b").into(), + hex!("6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220").into(), + hex!("b7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f").into(), + ], + finalized_block_root: hex!("751414cd97c0624f922b3e80285e9f776b08fa22fd5f87391f2ed7ef571a8d46").into(), + }), + execution_header: VersionedExecutionPayloadHeader::Deneb(deneb::ExecutionPayloadHeader { + parent_hash: hex!("8092290aa21b7751576440f77edd02a94058429ce50e63a92d620951fb25eda2").into(), + fee_recipient: hex!("0000000000000000000000000000000000000000").into(), + state_root: hex!("96a83e9ddf745346fafcb0b03d57314623df669ed543c110662b21302a0fae8b").into(), + receipts_root: hex!("dccdfceea05036f7b61dcdabadc937945d31e68a8d3dfd4dc85684457988c284").into(), + logs_bloom: hex!("00000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000400000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000080000000000000000000000000000040004000000000000002002002000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000080000000000000000000000000000000000100000000000000000200000200000010").into(), + prev_randao: hex!("62e309d4f5119d1f5c783abc20fc1a549efbab546d8d0b25ff1cfd58be524e67").into(), + block_number: 393, + gas_limit: 54492273, + gas_used: 199644, + timestamp: 1710552813, + extra_data: hex!("d983010d0b846765746888676f312e32312e368664617277696e").into(), + base_fee_per_gas: U256::from(7u64), + block_hash: hex!("6a9810efb9581d30c1a5c9074f27c68ea779a8c1ae31c213241df16225f4e131").into(), + transactions_root: hex!("2cfa6ed7327e8807c7973516c5c32a68ef2459e586e8067e113d081c3bd8c07d").into(), + withdrawals_root: hex!("792930bbd5baac43bcc798ee49aa8185ef76bb3b44ba62b91d86ae569e4bb535").into(), + blob_gas_used: 0, + excess_blob_gas: 0, + }), + execution_branch: vec![ + hex!("a6833fa629f3286b6916c6e50b8bf089fc9126bee6f64d0413b4e59c1265834d").into(), + hex!("b46f0c01805fe212e15907981b757e6c496b0cb06664224655613dcec82505bb").into(), + hex!("db56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71").into(), + hex!("d3af7c05c516726be7505239e0b9c7cb53d24abce6b91cdb3b3995f0164a75da").into(), + ], + } }, }, + finalized_header: BeaconHeader { + slot: 864, + proposer_index: 4, + parent_root: hex!("614e7672f991ac268cd841055973f55e1e42228831a211adef207bb7329be614").into(), + state_root: hex!("5fa8dfca3d760e4242ab46d529144627aa85348a19173b6e081172c701197a4a").into(), + body_root: hex!("0f34c083b1803666bb1ac5e73fa71582731a2cf37d279ff0a3b0cad5a2ff371e").into(), + }, + block_roots_root: hex!("b9aab9c388c4e4fcd899b71f62c498fc73406e38e8eb14aa440e9affa06f2a10").into(), } } diff --git a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/register_token_with_insufficient_fee.rs b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/register_token_with_insufficient_fee.rs deleted file mode 100644 index 82ff2283101e331395b06dd14f3876076e71722e..0000000000000000000000000000000000000000 --- a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/register_token_with_insufficient_fee.rs +++ /dev/null @@ -1,42 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// SPDX-FileCopyrightText: 2023 Snowfork -// Generated, do not edit! -// See ethereum client README.md for instructions to generate - -use crate::InboundQueueFixture; -use hex_literal::hex; -use snowbridge_beacon_primitives::CompactExecutionHeader; -use snowbridge_core::inbound::{Log, Message, Proof}; -use sp_std::vec; - -pub fn make_register_token_with_infufficient_fee_message() -> InboundQueueFixture { - InboundQueueFixture { - execution_header: CompactExecutionHeader{ - parent_hash: hex!("998e81dc6df788a920b67e058fbde0dc3f4ec6f11f3f7cd8c3148e6d99584885").into(), - block_number: 338, - state_root: hex!("30ef9c9db2609de19bbc6c3cbeddac889e82bbcb2db20304b3abdfbdc7134cbf").into(), - receipts_root: hex!("969335c3132a007cb8b5886a3c23dd8da63cba04aeda29857a86ee1c13dae782").into(), - }, - message: Message { - event_log: Log { - address: hex!("eda338e4dc46038493b885327842fd3e301cab39").into(), - topics: vec![ - hex!("7153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84f").into(), - hex!("c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539").into(), - hex!("5f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0").into(), - ], - // insufficient xcm fee as only 1000(hex:e803) - data: hex!("00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e00a736aa00000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7de8030000000000000000000000000000000000000000000000000000000000000000").into(), - }, - proof: Proof { - block_hash: hex!("5976f37f0e331d194eb331df74355ef47565c3a1bd11c95a45b681f6917085c1").into(), - tx_index: 0, - data: (vec![ - hex!("969335c3132a007cb8b5886a3c23dd8da63cba04aeda29857a86ee1c13dae782").to_vec(), - ], vec![ - hex!("f9028e822080b9028802f90284018301d205b9010000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000080000000000000000000000000000004000000000080000000000000000000000000000000000010100000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000040004000000000000002000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000200000000000010f90179f85894eda338e4dc46038493b885327842fd3e301cab39e1a0f78bb28d4b1d7da699e5c0bc2be29c2b04b5aab6aacf6298fe5304f9db9c6d7ea000000000000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7df9011c94eda338e4dc46038493b885327842fd3e301cab39f863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a05f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0b8a000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002e00a736aa00000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7de8030000000000000000000000000000000000000000000000000000000000000000").to_vec(), - ]), - }, - }, - } -} diff --git a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/send_token.rs b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/send_token.rs index 2562217100eaf7cd0c5dff420c55826b277d3570..4075febab59d596b94c74a140d0176c25d3a1991 100755 --- a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/send_token.rs +++ b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/send_token.rs @@ -3,20 +3,16 @@ // Generated, do not edit! // See ethereum client README.md for instructions to generate -use crate::InboundQueueFixture; use hex_literal::hex; -use snowbridge_beacon_primitives::CompactExecutionHeader; -use snowbridge_core::inbound::{Log, Message, Proof}; +use snowbridge_beacon_primitives::{ + types::deneb, AncestryProof, BeaconHeader, ExecutionProof, VersionedExecutionPayloadHeader, +}; +use snowbridge_core::inbound::{InboundQueueFixture, Log, Message, Proof}; +use sp_core::U256; use sp_std::vec; pub fn make_send_token_message() -> InboundQueueFixture { InboundQueueFixture { - execution_header: CompactExecutionHeader{ - parent_hash: hex!("920cecde45d428e3a77590b70f8533cf4c2c36917b8a7b74c915e7fa3dae7075").into(), - block_number: 1148, - state_root: hex!("bbc6ba0e9940d641afecbbaf3f97abd2b9ffaf2f6bd4879c4a71e659eca89978").into(), - receipts_root: hex!("9f3340b57eddc1f86de30776db57faeca80269a3dd459031741988dec240ce34").into(), - }, message: Message { event_log: Log { address: hex!("eda338e4dc46038493b885327842fd3e301cab39").into(), @@ -28,14 +24,72 @@ pub fn make_send_token_message() -> InboundQueueFixture { data: hex!("00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000005f00a736aa00000000000187d1f7fdfee7f651fabc8bfcb6e086c278b77a7d008eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48000064a7b3b6e00d000000000000000000e40b5402000000000000000000000000").into(), }, proof: Proof { - block_hash: hex!("d3c155f123c3cbff22f3d7869283e02179edea9ffa7a5e9a4d8414c2a6b8991f").into(), - tx_index: 0, - data: (vec![ - hex!("9f3340b57eddc1f86de30776db57faeca80269a3dd459031741988dec240ce34").to_vec(), + receipt_proof: (vec![ + hex!("f9d844c5b79638609ba385b910fec3b5d891c9d7b189f135f0432f33473de915").to_vec(), ], vec![ - hex!("f90451822080b9044b02f90447018301bcb9b9010000800000000000000000000020000000000000000000004000000000000000000400000000000000000000001000000010000000000000000000000008000000200000000000000001000008000000000000000000000000000000008000080000000000200000000000000000000000000100000000000000000011000000000000020200000000000000000000000000003000000040080008000000000000000000040044000021000000002000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000200800000000000f9033cf89b9487d1f7fdfee7f651fabc8bfcb6e086c278b77a7df863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa000000000000000000000000090a987b944cb1dcce5564e5fdecd7a54d3de27fea000000000000000000000000057a2d4ff0c3866d96556884bf09fecdd7ccd530ca00000000000000000000000000000000000000000000000000de0b6b3a7640000f9015d94eda338e4dc46038493b885327842fd3e301cab39f884a024c5d2de620c6e25186ae16f6919eba93b6e2c1a33857cc419d9f3a00d6967e9a000000000000000000000000090a987b944cb1dcce5564e5fdecd7a54d3de27fea000000000000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7da000000000000000000000000000000000000000000000000000000000000003e8b8c000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000de0b6b3a76400000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000208eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48f9013c94eda338e4dc46038493b885327842fd3e301cab39f863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a0c8eaf22f2cb07bac4679df0a660e7115ed87fcfd4e32ac269f6540265bbbd26fb8c000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000005f00a736aa00000000000187d1f7fdfee7f651fabc8bfcb6e086c278b77a7d008eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48000064a7b3b6e00d000000000000000000e40b5402000000000000000000000000").to_vec(), + hex!("f90451822080b9044b02f90447018301bcb6b9010000800000000000000000000020000000000000000000004000000000000000000400000000000000000000001000000010000000000000000000000008000000200000000000000001000008000000000000000000000000000000008000080000000000200000000000000000000000000100000000000000000011000000000000020200000000000000000000000000003000000040080008000000000000000000040044000021000000002000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000200800000000000f9033cf89b9487d1f7fdfee7f651fabc8bfcb6e086c278b77a7df863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa000000000000000000000000090a987b944cb1dcce5564e5fdecd7a54d3de27fea000000000000000000000000057a2d4ff0c3866d96556884bf09fecdd7ccd530ca00000000000000000000000000000000000000000000000000de0b6b3a7640000f9015d94eda338e4dc46038493b885327842fd3e301cab39f884a024c5d2de620c6e25186ae16f6919eba93b6e2c1a33857cc419d9f3a00d6967e9a000000000000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7da000000000000000000000000090a987b944cb1dcce5564e5fdecd7a54d3de27fea000000000000000000000000000000000000000000000000000000000000003e8b8c000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000de0b6b3a76400000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000208eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48f9013c94eda338e4dc46038493b885327842fd3e301cab39f863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a0c8eaf22f2cb07bac4679df0a660e7115ed87fcfd4e32ac269f6540265bbbd26fb8c000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000005f00a736aa00000000000187d1f7fdfee7f651fabc8bfcb6e086c278b77a7d008eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48000064a7b3b6e00d000000000000000000e40b5402000000000000000000000000").to_vec(), ]), + execution_proof: ExecutionProof { + header: BeaconHeader { + slot: 2321, + proposer_index: 5, + parent_root: hex!("2add14727840d3a5ea061e14baa47030bb81380a65999200d119e73b86411d20").into(), + state_root: hex!("d962981467920bb2b7efa4a7a1baf64745582c3250857f49a957c5dae9a0da39").into(), + body_root: hex!("18e3f7f51a350f371ad35d166f2683b42af51d1836b295e4093be08acb0dcb7a").into(), + }, + ancestry_proof: Some(AncestryProof { + header_branch: vec![ + hex!("2add14727840d3a5ea061e14baa47030bb81380a65999200d119e73b86411d20").into(), + hex!("48b2e2f5256906a564e5058698f70e3406765fefd6a2edc064bb5fb88aa2ed0a").into(), + hex!("e5ed7c704e845418219b2fda42cd2f3438ffbe4c4b320935ae49439c6189f7a7").into(), + hex!("4a7ce24526b3f571548ad69679e4e260653a1b3b911a344e7f988f25a5c917a7").into(), + hex!("46fc859727ab0d0e8c344011f7d7a4426ccb537bb51363397e56cc7153f56391").into(), + hex!("f496b6f85a7c6c28a9048f2153550a7c5bcb4b23844ed3b87f6baa646124d8a3").into(), + hex!("7318644e474beb46e595a1875acc7444b937f5208065241911d2a71ac50c2de3").into(), + hex!("5cf48519e518ac64286aef5391319782dd38831d5dcc960578a6b9746d5f8cee").into(), + hex!("efb3e50fa39ca9fe7f76adbfa36fa8451ec2fd5d07b22aaf822137c04cf95a76").into(), + hex!("2206cd50750355ffaef4a67634c21168f2b564c58ffd04f33b0dc7af7dab3291").into(), + hex!("1a4014f6c4fcce9949fba74cb0f9e88df086706f9e05560cc9f0926f8c90e373").into(), + hex!("2df7cc0bcf3060be4132c63da7599c2600d9bbadf37ab001f15629bc2255698e").into(), + hex!("b7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f").into(), + ], + finalized_block_root: hex!("f869dd1c9598043008a3ac2a5d91b3d6c7b0bb3295b3843bc84c083d70b0e604").into(), + }), + execution_header: VersionedExecutionPayloadHeader::Deneb(deneb::ExecutionPayloadHeader { + parent_hash: hex!("5d7859883dde1eba6c98b20eac18426134b25da2a89e5e360f3343b15e0e0a31").into(), + fee_recipient: hex!("0000000000000000000000000000000000000000").into(), + state_root: hex!("f8fbebed4c84d46231bd293bb9fbc9340d5c28c284d99fdaddb77238b8960ae2").into(), + receipts_root: hex!("f9d844c5b79638609ba385b910fec3b5d891c9d7b189f135f0432f33473de915").into(), + logs_bloom: hex!("00800000000000000000000020000000000000000000004000000000000000000400000000000000000000001000000010000000000000000000000008000000200000000000000001000008000000000000000000000000000000008000080000000000200000000000000000000000000100000000000000000011000000000000020200000000000000000000000000003000000040080008000000000000000000040044000021000000002000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000200800000000000").into(), + prev_randao: hex!("15533eeb366c6386bea5aeb8f425871928348c092209e4377f2418a6dedd7fd0").into(), + block_number: 2321, + gas_limit: 30000000, + gas_used: 113846, + timestamp: 1710554741, + extra_data: hex!("d983010d0b846765746888676f312e32312e368664617277696e").into(), + base_fee_per_gas: U256::from(7u64), + block_hash: hex!("585a07122a30339b03b6481eae67c2d3de2b6b64f9f426230986519bf0f1bdfe").into(), + transactions_root: hex!("09cd60ee2207d804397c81f7b7e1e5d3307712b136e5376623a80317a4bdcd7a").into(), + withdrawals_root: hex!("792930bbd5baac43bcc798ee49aa8185ef76bb3b44ba62b91d86ae569e4bb535").into(), + blob_gas_used: 0, + excess_blob_gas: 0, + }), + execution_branch: vec![ + hex!("9d419471a9a4719b40e7607781fbe32d9a7766b79805505c78c0c58133496ba2").into(), + hex!("b46f0c01805fe212e15907981b757e6c496b0cb06664224655613dcec82505bb").into(), + hex!("db56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71").into(), + hex!("bee375b8f1bbe4cd0e783c78026c1829ae72741c2dead5cab05d6834c5e5df65").into(), + ], + } }, }, + finalized_header: BeaconHeader { + slot: 4032, + proposer_index: 5, + parent_root: hex!("180aaaec59d38c3860e8af203f01f41c9bc41665f4d17916567c80f6cd23e8a2").into(), + state_root: hex!("3341790429ed3bf894cafa3004351d0b99e08baf6c38eb2a54d58e69fd2d19c6").into(), + body_root: hex!("a221e0c695ac7b7d04ce39b28b954d8a682ecd57961d81b44783527c6295f455").into(), + }, + block_roots_root: hex!("5744385ef06f82e67606f49aa29cd162f2e837a68fb7bd82f1fc6155d9f8640f").into(), } } diff --git a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/send_token_to_penpal.rs b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/send_token_to_penpal.rs index 86ba3f7ecc18f3743fd22592fc23a50f26b447d7..6a951b568ae5d39cd3c6deca801b5feb1d05322f 100755 --- a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/send_token_to_penpal.rs +++ b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/send_token_to_penpal.rs @@ -3,39 +3,93 @@ // Generated, do not edit! // See ethereum client README.md for instructions to generate -use crate::InboundQueueFixture; use hex_literal::hex; -use snowbridge_beacon_primitives::CompactExecutionHeader; -use snowbridge_core::inbound::{Log, Message, Proof}; +use snowbridge_beacon_primitives::{ + types::deneb, AncestryProof, BeaconHeader, ExecutionProof, VersionedExecutionPayloadHeader, +}; +use snowbridge_core::inbound::{InboundQueueFixture, Log, Message, Proof}; +use sp_core::U256; use sp_std::vec; pub fn make_send_token_to_penpal_message() -> InboundQueueFixture { InboundQueueFixture { - execution_header: CompactExecutionHeader{ - parent_hash: hex!("434148c290f27ee4be34fa344cd7608bf942a4541b27c9d868439631b3f37a8d").into(), - block_number: 816, - state_root: hex!("595e643f9095870e30e85e2bbef7d9e3a39df5aae839d26cf455d3dbf3e5a539").into(), - receipts_root: hex!("c40ab2c4abcfdea4f42195e0ad822806e5423108021c3b542646c7193319a6c1").into(), - }, message: Message { event_log: Log { address: hex!("eda338e4dc46038493b885327842fd3e301cab39").into(), topics: vec![ hex!("7153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84f").into(), hex!("c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539").into(), - hex!("c8eaf22f2cb07bac4679df0a660e7115ed87fcfd4e32ac269f6540265bbbd26f").into(), + hex!("be323bced46a1a49c8da2ab62ad5e974fd50f1dabaeed70b23ca5bcf14bfe4aa").into(), ], - data: hex!("00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000007300a736aa00000000000187d1f7fdfee7f651fabc8bfcb6e086c278b77a7d01d00700001cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c00286bee000000000000000000000000000064a7b3b6e00d000000000000000000e40b5402000000000000000000000000000000000000000000000000").into(), + data: hex!("00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000007300a736aa00000000000187d1f7fdfee7f651fabc8bfcb6e086c278b77a7d01d00700001cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c00286bee000000000000000000000000000064a7b3b6e00d000000000000000000e40b5402000000000000000000000000000000000000000000000000").into(), }, proof: Proof { - block_hash: hex!("6c49a7f8fb2014a23e58a949c95a6743174589a7ce83434b073dc05dec402f3d").into(), - tx_index: 0, - data: (vec![ - hex!("c40ab2c4abcfdea4f42195e0ad822806e5423108021c3b542646c7193319a6c1").to_vec(), + receipt_proof: (vec![ + hex!("106f1eaeac04e469da0020ad5c8a72af66323638bd3f561a3c8236063202c120").to_vec(), ], vec![ - hex!("f90471822080b9046b02f90467018301d30fb9010000800000000000000000000000000000000000000000004000000000000000000400000000004000000000001000000010000000000000000000000008000000200000000000000001000008000000000000000000000000000000008000080000000000200000000000000000000000000100000000000000000011000000000000020000000000000000000000000000003000000000080018000000000000000000040044000021000000002000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000200820000000000f9035cf89b9487d1f7fdfee7f651fabc8bfcb6e086c278b77a7df863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa000000000000000000000000090a987b944cb1dcce5564e5fdecd7a54d3de27fea000000000000000000000000057a2d4ff0c3866d96556884bf09fecdd7ccd530ca00000000000000000000000000000000000000000000000000de0b6b3a7640000f9015d94eda338e4dc46038493b885327842fd3e301cab39f884a024c5d2de620c6e25186ae16f6919eba93b6e2c1a33857cc419d9f3a00d6967e9a000000000000000000000000090a987b944cb1dcce5564e5fdecd7a54d3de27fea000000000000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7da000000000000000000000000000000000000000000000000000000000000007d0b8c000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000de0b6b3a76400000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000201cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07cf9015c94eda338e4dc46038493b885327842fd3e301cab39f863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a0c8eaf22f2cb07bac4679df0a660e7115ed87fcfd4e32ac269f6540265bbbd26fb8e000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000007300a736aa00000000000187d1f7fdfee7f651fabc8bfcb6e086c278b77a7d01d00700001cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c00286bee000000000000000000000000000064a7b3b6e00d000000000000000000e40b5402000000000000000000000000000000000000000000000000").to_vec(), + hex!("f90471822080b9046b02f904670183017d9cb9010000800000000000008000000000000000000000000000004000000000000000000400000000004000000000001000000010000000000000000000001008000000000000000000000001000008000040000000000000000000000000008000080000000000200000000000000000000000000100000000000000000010000000000000020000000000000000000000000000003000000000080018000000000000000000040004000021000000002000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000200820000000000f9035cf89b9487d1f7fdfee7f651fabc8bfcb6e086c278b77a7df863a0ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa000000000000000000000000090a987b944cb1dcce5564e5fdecd7a54d3de27fea000000000000000000000000057a2d4ff0c3866d96556884bf09fecdd7ccd530ca00000000000000000000000000000000000000000000000000de0b6b3a7640000f9015d94eda338e4dc46038493b885327842fd3e301cab39f884a024c5d2de620c6e25186ae16f6919eba93b6e2c1a33857cc419d9f3a00d6967e9a000000000000000000000000087d1f7fdfee7f651fabc8bfcb6e086c278b77a7da000000000000000000000000090a987b944cb1dcce5564e5fdecd7a54d3de27fea000000000000000000000000000000000000000000000000000000000000007d0b8c000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000de0b6b3a76400000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000201cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07cf9015c94eda338e4dc46038493b885327842fd3e301cab39f863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a0be323bced46a1a49c8da2ab62ad5e974fd50f1dabaeed70b23ca5bcf14bfe4aab8e000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000007300a736aa00000000000187d1f7fdfee7f651fabc8bfcb6e086c278b77a7d01d00700001cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c00286bee000000000000000000000000000064a7b3b6e00d000000000000000000e40b5402000000000000000000000000000000000000000000000000").to_vec(), ]), + execution_proof: ExecutionProof { + header: BeaconHeader { + slot: 4235, + proposer_index: 4, + parent_root: hex!("1b31e6264c19bcad120e434e0aede892e7d7c8ed80ab505cb593d9a4a16bc566").into(), + state_root: hex!("725f51771a0ecf72c647a283ab814ca088f998eb8c203181496b0b8e01f624fa").into(), + body_root: hex!("6f1c326d192e7e97e21e27b16fd7f000b8fa09b435ff028849927e382302b0ce").into(), + }, + ancestry_proof: Some(AncestryProof { + header_branch: vec![ + hex!("1b31e6264c19bcad120e434e0aede892e7d7c8ed80ab505cb593d9a4a16bc566").into(), + hex!("335eb186c077fa7053ec96dcc5d34502c997713d2d5bc4eb74842118d8cd5a64").into(), + hex!("326607faf2a7dfc9cfc4b6895f8f3d92a659552deb2c8fd1e892ec00c86c734c").into(), + hex!("4e20002125d7b6504df7c774f3f48e018e1e6762d03489149670a8335bba1425").into(), + hex!("e76af5cd61aade5aec8282b6f1df9046efa756b0466bba5e49032410f7739a1b").into(), + hex!("ee4dcd9527712116380cddafd120484a3bedf867225bbb86850b84decf6da730").into(), + hex!("e4687a07421d3150439a2cd2f09f3b468145d75b359a2e5fa88dfbec51725b15").into(), + hex!("38eaa78978e95759aa9b6f8504a8dbe36151f20ae41907e6a1ea165700ceefcd").into(), + hex!("1c1b071ec6f13e15c47d07d1bfbcc9135d6a6c819e68e7e6078a2007418c1a23").into(), + hex!("0b3ad7ad193c691c8c4ba1606ad2a90482cd1d033c7db58cfe739d0e20431e9e").into(), + hex!("ffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b").into(), + hex!("6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220").into(), + hex!("b2ffec5f2c14640305dd941330f09216c53b99d198e93735a400a6d3a4de191f").into(), + ], + finalized_block_root: hex!("08be7a59e947f08cd95c4ef470758730bf9e3b0db0824cb663ea541c39b0e65c").into(), + }), + execution_header: VersionedExecutionPayloadHeader::Deneb(deneb::ExecutionPayloadHeader { + parent_hash: hex!("5d1186ae041f58785edb2f01248e95832f2e5e5d6c4eb8f7ff2f58980bfc2de9").into(), + fee_recipient: hex!("0000000000000000000000000000000000000000").into(), + state_root: hex!("2a66114d20e93082c8e9b47c8d401a937013487d757c9c2f3123cf43dc1f656d").into(), + receipts_root: hex!("106f1eaeac04e469da0020ad5c8a72af66323638bd3f561a3c8236063202c120").into(), + logs_bloom: hex!("00800000000000008000000000000000000000000000004000000000000000000400000000004000000000001000000010000000000000000000001008000000000000000000000001000008000040000000000000000000000000008000080000000000200000000000000000000000000100000000000000000010000000000000020000000000000000000000000000003000000000080018000000000000000000040004000021000000002000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000200820000000000").into(), + prev_randao: hex!("92e063c7e369b74149fdd1d7132ed2f635a19b9d8bff57637b8ee4736576426e").into(), + block_number: 4235, + gas_limit: 30000000, + gas_used: 97692, + timestamp: 1710556655, + extra_data: hex!("d983010d0b846765746888676f312e32312e368664617277696e").into(), + base_fee_per_gas: U256::from(7u64), + block_hash: hex!("ce24fe3047aa20a8f222cd1d04567c12b39455400d681141962c2130e690953f").into(), + transactions_root: hex!("0c8388731de94771777c60d452077065354d90d6e5088db61fc6a134684195cc").into(), + withdrawals_root: hex!("792930bbd5baac43bcc798ee49aa8185ef76bb3b44ba62b91d86ae569e4bb535").into(), + blob_gas_used: 0, + excess_blob_gas: 0, + }), + execution_branch: vec![ + hex!("99d397fa180078e66cd3a3b77bcb07553052f4e21d447167f3a406f663b14e6a").into(), + hex!("b46f0c01805fe212e15907981b757e6c496b0cb06664224655613dcec82505bb").into(), + hex!("db56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71").into(), + hex!("53ddf17147819c1abb918178b0230d965d1bc2c0d389f45e91e54cb1d2d468aa").into(), + ], + } }, }, + finalized_header: BeaconHeader { + slot: 4672, + proposer_index: 4, + parent_root: hex!("951233bf9f4bddfb2fa8f54e3bd0c7883779ef850e13e076baae3130dd7732db").into(), + state_root: hex!("4d303003b8cb097cbcc14b0f551ee70dac42de2c1cc2f4acfca7058ca9713291").into(), + body_root: hex!("664d13952b6f369bf4cf3af74d067ec33616eb57ed3a8a403fd5bae4fbf737dd").into(), + }, + block_roots_root: hex!("af71048297c070e6539cf3b9b90ae07d86d363454606bc239734629e6b49b983").into(), } } diff --git a/bridges/snowbridge/pallets/inbound-queue/src/benchmarking/mod.rs b/bridges/snowbridge/pallets/inbound-queue/src/benchmarking/mod.rs index 931befa2ac67810eaf7aac1b8156a46fc398f7a2..d59d92757721bac925cdfe67542138771c2a9dfa 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/benchmarking/mod.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/benchmarking/mod.rs @@ -19,8 +19,8 @@ mod benchmarks { let create_message = make_register_token_message(); T::Helper::initialize_storage( - create_message.message.proof.block_hash, - create_message.execution_header, + create_message.finalized_header, + create_message.block_roots_root, ); let sovereign_account = sibling_sovereign_account::(1000u32.into()); diff --git a/bridges/snowbridge/pallets/inbound-queue/src/lib.rs b/bridges/snowbridge/pallets/inbound-queue/src/lib.rs index bdc21fcf037025f933b7c11e92937744e83e1da7..8acbb0c2916e704930268835e12bd14972737114 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/lib.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/lib.rs @@ -28,9 +28,6 @@ mod envelope; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; -#[cfg(feature = "runtime-benchmarks")] -use snowbridge_beacon_primitives::CompactExecutionHeader; - pub mod weights; #[cfg(test)] @@ -44,7 +41,7 @@ use envelope::Envelope; use frame_support::{ traits::{ fungible::{Inspect, Mutate}, - tokens::Preservation, + tokens::{Fortitude, Preservation}, }, weights::WeightToFee, PalletError, @@ -52,6 +49,7 @@ use frame_support::{ use frame_system::ensure_signed; use scale_info::TypeInfo; use sp_core::{H160, H256}; +use sp_runtime::traits::Zero; use sp_std::{convert::TryFrom, vec}; use xcm::prelude::{ send_xcm, Instruction::SetTopic, Junction::*, Location, SendError as XcmpSendError, SendXcm, @@ -72,6 +70,9 @@ use sp_runtime::{traits::Saturating, SaturatedConversion, TokenError}; pub use weights::WeightInfo; +#[cfg(feature = "runtime-benchmarks")] +use snowbridge_beacon_primitives::BeaconHeader; + type BalanceOf = <::Token as Inspect<::AccountId>>::Balance; @@ -91,7 +92,7 @@ pub mod pallet { #[cfg(feature = "runtime-benchmarks")] pub trait BenchmarkHelper { - fn initialize_storage(block_hash: H256, header: CompactExecutionHeader); + fn initialize_storage(beacon_header: BeaconHeader, block_roots_root: H256); } #[pallet::config] @@ -261,11 +262,19 @@ pub mod pallet { } })?; - // Reward relayer from the sovereign account of the destination parachain - // Expected to fail if sovereign account has no funds + // Reward relayer from the sovereign account of the destination parachain, only if funds + // are available let sovereign_account = sibling_sovereign_account::(channel.para_id); let delivery_cost = Self::calculate_delivery_cost(message.encode().len() as u32); - T::Token::transfer(&sovereign_account, &who, delivery_cost, Preservation::Preserve)?; + let amount = T::Token::reducible_balance( + &sovereign_account, + Preservation::Preserve, + Fortitude::Polite, + ) + .min(delivery_cost); + if !amount.is_zero() { + T::Token::transfer(&sovereign_account, &who, amount, Preservation::Preserve)?; + } // Decode message into XCM let (xcm, fee) = diff --git a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs index 39e9532ed321c1c6f83fe5e00886b3d7116f8aec..c96c868bc26ef71984e70bc40863db73cf18b9c5 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs @@ -4,11 +4,13 @@ use super::*; use frame_support::{ derive_impl, parameter_types, - traits::{ConstU128, ConstU32, Everything}, + traits::{ConstU32, Everything}, weights::IdentityFee, }; use hex_literal::hex; -use snowbridge_beacon_primitives::{Fork, ForkVersions}; +use snowbridge_beacon_primitives::{ + types::deneb, BeaconHeader, ExecutionProof, Fork, ForkVersions, VersionedExecutionPayloadHeader, +}; use snowbridge_core::{ gwei, inbound::{Log, Proof, VerificationError}, @@ -20,7 +22,7 @@ use sp_runtime::{ traits::{BlakeTwo256, IdentifyAccount, IdentityLookup, Verify}, BuildStorage, FixedU128, MultiSignature, }; -use sp_std::convert::From; +use sp_std::{convert::From, default::Default}; use xcm::{latest::SendXcm, prelude::*}; use xcm_executor::AssetsInHolding; @@ -65,6 +67,10 @@ impl frame_system::Config for Test { type Block = Block; } +parameter_types! { + pub const ExistentialDeposit: u128 = 1; +} + impl pallet_balances::Config for Test { type MaxLocks = (); type MaxReserves = (); @@ -72,7 +78,7 @@ impl pallet_balances::Config for Test { type Balance = Balance; type RuntimeEvent = RuntimeEvent; type DustRemoval = (); - type ExistentialDeposit = ConstU128<1>; + type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); type FreezeIdentifier = (); @@ -82,7 +88,6 @@ impl pallet_balances::Config for Test { } parameter_types! { - pub const ExecutionHeadersPruneThreshold: u32 = 10; pub const ChainForkVersions: ForkVersions = ForkVersions{ genesis: Fork { version: [0, 0, 0, 1], // 0x00000001 @@ -110,7 +115,6 @@ parameter_types! { impl snowbridge_pallet_ethereum_client::Config for Test { type RuntimeEvent = RuntimeEvent; type ForkVersions = ChainForkVersions; - type MaxExecutionHeadersToKeep = ExecutionHeadersPruneThreshold; type WeightInfo = (); } @@ -139,7 +143,7 @@ parameter_types! { #[cfg(feature = "runtime-benchmarks")] impl BenchmarkHelper for Test { // not implemented since the MockVerifier is used for tests - fn initialize_storage(_: H256, _: CompactExecutionHeader) {} + fn initialize_storage(_: BeaconHeader, _: H256) {} } // Mock XCM sender that always succeeds @@ -335,5 +339,32 @@ pub fn mock_event_log_invalid_gateway() -> Log { } } +pub fn mock_execution_proof() -> ExecutionProof { + ExecutionProof { + header: BeaconHeader::default(), + ancestry_proof: None, + execution_header: VersionedExecutionPayloadHeader::Deneb(deneb::ExecutionPayloadHeader { + parent_hash: Default::default(), + fee_recipient: Default::default(), + state_root: Default::default(), + receipts_root: Default::default(), + logs_bloom: vec![], + prev_randao: Default::default(), + block_number: 0, + gas_limit: 0, + gas_used: 0, + timestamp: 0, + extra_data: vec![], + base_fee_per_gas: Default::default(), + block_hash: Default::default(), + transactions_root: Default::default(), + withdrawals_root: Default::default(), + blob_gas_used: 0, + excess_blob_gas: 0, + }), + execution_branch: vec![], + } +} + pub const ASSET_HUB_PARAID: u32 = 1000u32; pub const TEMPLATE_PARAID: u32 = 1001u32; diff --git a/bridges/snowbridge/pallets/inbound-queue/src/test.rs b/bridges/snowbridge/pallets/inbound-queue/src/test.rs index 9a47e475b8c997a6fe4cc4d1860af3da0bf52a57..bd993c968df7373910b438a2e090ef2896bb41dd 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/test.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/test.rs @@ -6,7 +6,7 @@ use frame_support::{assert_noop, assert_ok}; use hex_literal::hex; use snowbridge_core::{inbound::Proof, ChannelId}; use sp_keyring::AccountKeyring as Keyring; -use sp_runtime::{DispatchError, TokenError}; +use sp_runtime::DispatchError; use sp_std::convert::From; use crate::{Error, Event as InboundQueueEvent}; @@ -25,9 +25,8 @@ fn test_submit_happy_path() { let message = Message { event_log: mock_event_log(), proof: Proof { - block_hash: Default::default(), - tx_index: Default::default(), - data: Default::default(), + receipt_proof: Default::default(), + execution_proof: mock_execution_proof(), }, }; @@ -77,9 +76,8 @@ fn test_submit_xcm_invalid_channel() { let message = Message { event_log: mock_event_log_invalid_channel(), proof: Proof { - block_hash: Default::default(), - tx_index: Default::default(), - data: Default::default(), + receipt_proof: Default::default(), + execution_proof: mock_execution_proof(), }, }; assert_noop!( @@ -103,9 +101,8 @@ fn test_submit_with_invalid_gateway() { let message = Message { event_log: mock_event_log_invalid_gateway(), proof: Proof { - block_hash: Default::default(), - tx_index: Default::default(), - data: Default::default(), + receipt_proof: Default::default(), + execution_proof: mock_execution_proof(), }, }; assert_noop!( @@ -129,9 +126,8 @@ fn test_submit_with_invalid_nonce() { let message = Message { event_log: mock_event_log(), proof: Proof { - block_hash: Default::default(), - tx_index: Default::default(), - data: Default::default(), + receipt_proof: Default::default(), + execution_proof: mock_execution_proof(), }, }; assert_ok!(InboundQueue::submit(origin.clone(), message.clone())); @@ -150,12 +146,12 @@ fn test_submit_with_invalid_nonce() { } #[test] -fn test_submit_no_funds_to_reward_relayers() { +fn test_submit_no_funds_to_reward_relayers_just_ignore() { new_tester().execute_with(|| { let relayer: AccountId = Keyring::Bob.into(); let origin = RuntimeOrigin::signed(relayer); - // Reset balance of sovereign_account to zero so to trigger the FundsUnavailable error + // Reset balance of sovereign_account to zero first let sovereign_account = sibling_sovereign_account::(ASSET_HUB_PARAID.into()); Balances::set_balance(&sovereign_account, 0); @@ -163,15 +159,12 @@ fn test_submit_no_funds_to_reward_relayers() { let message = Message { event_log: mock_event_log(), proof: Proof { - block_hash: Default::default(), - tx_index: Default::default(), - data: Default::default(), + receipt_proof: Default::default(), + execution_proof: mock_execution_proof(), }, }; - assert_noop!( - InboundQueue::submit(origin.clone(), message.clone()), - TokenError::FundsUnavailable - ); + // Check submit successfully in case no funds available + assert_ok!(InboundQueue::submit(origin.clone(), message.clone())); }); } @@ -183,9 +176,8 @@ fn test_set_operating_mode() { let message = Message { event_log: mock_event_log(), proof: Proof { - block_hash: Default::default(), - tx_index: Default::default(), - data: Default::default(), + receipt_proof: Default::default(), + execution_proof: mock_execution_proof(), }, }; @@ -210,3 +202,44 @@ fn test_set_operating_mode_root_only() { ); }); } + +#[test] +fn test_submit_no_funds_to_reward_relayers_and_ed_preserved() { + new_tester().execute_with(|| { + let relayer: AccountId = Keyring::Bob.into(); + let origin = RuntimeOrigin::signed(relayer); + + // Reset balance of sovereign account to (ED+1) first + let sovereign_account = sibling_sovereign_account::(ASSET_HUB_PARAID.into()); + Balances::set_balance(&sovereign_account, ExistentialDeposit::get() + 1); + + // Submit message successfully + let message = Message { + event_log: mock_event_log(), + proof: Proof { + receipt_proof: Default::default(), + execution_proof: mock_execution_proof(), + }, + }; + assert_ok!(InboundQueue::submit(origin.clone(), message.clone())); + + // Check balance of sovereign account to ED + let amount = Balances::balance(&sovereign_account); + assert_eq!(amount, ExistentialDeposit::get()); + + // Submit another message with nonce set as 2 + let mut event_log = mock_event_log(); + event_log.data[31] = 2; + let message = Message { + event_log, + proof: Proof { + receipt_proof: Default::default(), + execution_proof: mock_execution_proof(), + }, + }; + assert_ok!(InboundQueue::submit(origin.clone(), message.clone())); + // Check balance of sovereign account as ED does not change + let amount = Balances::balance(&sovereign_account); + assert_eq!(amount, ExistentialDeposit::get()); + }); +} diff --git a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml index 0606e9de33056c9dffae50befcc1da5e865dca44..5315d6b4adbc1618720c7e0c781c65353c0ca4d6 100644 --- a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml @@ -23,7 +23,7 @@ sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-fea [dev-dependencies] hex-literal = { version = "0.4.1" } -env_logger = "0.9" +env_logger = "0.11" hex = "0.4" array-bytes = "4.1" sp-crypto-hashing = { path = "../../../../../substrate/primitives/crypto/hashing" } diff --git a/bridges/snowbridge/pallets/outbound-queue/src/mock.rs b/bridges/snowbridge/pallets/outbound-queue/src/mock.rs index 67877a05c79a19ff64a121705f5b8f5a6356170c..5eeeeead140018e81a8aaaf302d678e3a4321e08 100644 --- a/bridges/snowbridge/pallets/outbound-queue/src/mock.rs +++ b/bridges/snowbridge/pallets/outbound-queue/src/mock.rs @@ -69,6 +69,7 @@ impl pallet_message_queue::Config for Test { type HeapSize = HeapSize; type MaxStale = MaxStale; type ServiceWeight = ServiceWeight; + type IdleMaxServiceWeight = (); type QueuePausedQuery = (); } diff --git a/bridges/snowbridge/pallets/system/src/mock.rs b/bridges/snowbridge/pallets/system/src/mock.rs index 0312456c98233a1e8538a741a2b19e0611aa670b..687072a49e2e5ca9e27d7f1be94ea89564c2275f 100644 --- a/bridges/snowbridge/pallets/system/src/mock.rs +++ b/bridges/snowbridge/pallets/system/src/mock.rs @@ -148,6 +148,7 @@ impl pallet_message_queue::Config for Test { type HeapSize = HeapSize; type MaxStale = MaxStale; type ServiceWeight = ServiceWeight; + type IdleMaxServiceWeight = (); type QueuePausedQuery = (); } diff --git a/bridges/snowbridge/primitives/beacon/src/lib.rs b/bridges/snowbridge/primitives/beacon/src/lib.rs index 4c569d0176c21882aa3875024551848b1cc97494..6579d0f60966e1aff54583457e5fe31f7fded8b6 100644 --- a/bridges/snowbridge/primitives/beacon/src/lib.rs +++ b/bridges/snowbridge/primitives/beacon/src/lib.rs @@ -15,12 +15,12 @@ pub mod updates; mod serde_utils; pub use types::{ - BeaconHeader, CompactBeaconState, CompactExecutionHeader, ExecutionHeaderState, - ExecutionPayloadHeader, FinalizedHeaderState, Fork, ForkData, ForkVersion, ForkVersions, Mode, - PublicKey, Signature, SigningData, SyncAggregate, SyncCommittee, SyncCommitteePrepared, + AncestryProof, BeaconHeader, CompactBeaconState, ExecutionPayloadHeader, ExecutionProof, + FinalizedHeaderState, Fork, ForkData, ForkVersion, ForkVersions, Mode, PublicKey, Signature, + SigningData, SyncAggregate, SyncCommittee, SyncCommitteePrepared, VersionedExecutionPayloadHeader, }; -pub use updates::{CheckpointUpdate, ExecutionHeaderUpdate, NextSyncCommitteeUpdate, Update}; +pub use updates::{CheckpointUpdate, NextSyncCommitteeUpdate, Update}; pub use bits::decompress_sync_committee_bits; pub use bls::{ diff --git a/bridges/snowbridge/primitives/beacon/src/types.rs b/bridges/snowbridge/primitives/beacon/src/types.rs index 2af522f56b0dc4b00a155252e927a90e9bca9ed1..e12350510c9b83c85e27ecfa1534b984ff23fb05 100644 --- a/bridges/snowbridge/primitives/beacon/src/types.rs +++ b/bridges/snowbridge/primitives/beacon/src/types.rs @@ -110,14 +110,6 @@ impl<'de> Deserialize<'de> for Signature { } } -#[derive(Copy, Clone, Default, Encode, Decode, TypeInfo, MaxEncodedLen)] -pub struct ExecutionHeaderState { - pub beacon_block_root: H256, - pub beacon_slot: u64, - pub block_hash: H256, - pub block_number: u64, -} - #[derive(Copy, Clone, Default, Encode, Decode, TypeInfo, MaxEncodedLen)] pub struct FinalizedHeaderState { pub beacon_block_root: H256, @@ -346,35 +338,6 @@ impl ExecutionPayloadHeader { } } -#[derive( - Default, - Encode, - Decode, - CloneNoBound, - PartialEqNoBound, - RuntimeDebugNoBound, - TypeInfo, - MaxEncodedLen, -)] -pub struct CompactExecutionHeader { - pub parent_hash: H256, - #[codec(compact)] - pub block_number: u64, - pub state_root: H256, - pub receipts_root: H256, -} - -impl From for CompactExecutionHeader { - fn from(execution_payload: ExecutionPayloadHeader) -> Self { - Self { - parent_hash: execution_payload.parent_hash, - block_number: execution_payload.block_number, - state_root: execution_payload.state_root, - receipts_root: execution_payload.receipts_root, - } - } -} - #[derive( Default, Encode, @@ -405,18 +368,6 @@ pub enum VersionedExecutionPayloadHeader { Deneb(deneb::ExecutionPayloadHeader), } -/// Convert VersionedExecutionPayloadHeader to CompactExecutionHeader -impl From for CompactExecutionHeader { - fn from(versioned_execution_header: VersionedExecutionPayloadHeader) -> Self { - match versioned_execution_header { - VersionedExecutionPayloadHeader::Capella(execution_payload_header) => - execution_payload_header.into(), - VersionedExecutionPayloadHeader::Deneb(execution_payload_header) => - execution_payload_header.into(), - } - } -} - impl VersionedExecutionPayloadHeader { pub fn hash_tree_root(&self) -> Result { match self { @@ -448,6 +399,45 @@ impl VersionedExecutionPayloadHeader { execution_payload_header.block_number, } } + + pub fn receipts_root(&self) -> H256 { + match self { + VersionedExecutionPayloadHeader::Capella(execution_payload_header) => + execution_payload_header.receipts_root, + VersionedExecutionPayloadHeader::Deneb(execution_payload_header) => + execution_payload_header.receipts_root, + } + } +} + +#[derive(Encode, Decode, CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound, TypeInfo)] +#[cfg_attr( + feature = "std", + derive(serde::Deserialize), + serde(deny_unknown_fields, bound(serialize = ""), bound(deserialize = "")) +)] +pub struct ExecutionProof { + /// Header for the beacon block containing the execution payload + pub header: BeaconHeader, + /// Proof that `header` is an ancestor of a finalized header + pub ancestry_proof: Option, + /// The execution header to be verified + pub execution_header: VersionedExecutionPayloadHeader, + /// Merkle proof that execution payload is contained within `header` + pub execution_branch: Vec, +} + +#[derive(Encode, Decode, CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound, TypeInfo)] +#[cfg_attr( + feature = "std", + derive(serde::Deserialize), + serde(deny_unknown_fields, bound(serialize = ""), bound(deserialize = "")) +)] +pub struct AncestryProof { + /// Merkle proof that `header` is an ancestor of `finalized_header` + pub header_branch: Vec, + /// Root of a finalized block that has already been imported into the light client + pub finalized_block_root: H256, } #[cfg(test)] @@ -576,7 +566,6 @@ pub enum Mode { } pub mod deneb { - use crate::CompactExecutionHeader; use codec::{Decode, Encode}; use frame_support::{CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound}; use scale_info::TypeInfo; @@ -627,15 +616,4 @@ pub mod deneb { pub blob_gas_used: u64, // [New in Deneb:EIP4844] pub excess_blob_gas: u64, // [New in Deneb:EIP4844] } - - impl From for CompactExecutionHeader { - fn from(execution_payload: ExecutionPayloadHeader) -> Self { - Self { - parent_hash: execution_payload.parent_hash, - block_number: execution_payload.block_number, - state_root: execution_payload.state_root, - receipts_root: execution_payload.receipts_root, - } - } - } } diff --git a/bridges/snowbridge/primitives/beacon/src/updates.rs b/bridges/snowbridge/primitives/beacon/src/updates.rs index 1ecd32c6d7b79f34854d9387fba78b99cce333b2..ca651b5806f270f85d830c218c53cb6a509cc6f4 100644 --- a/bridges/snowbridge/primitives/beacon/src/updates.rs +++ b/bridges/snowbridge/primitives/beacon/src/updates.rs @@ -6,7 +6,7 @@ use scale_info::TypeInfo; use sp_core::H256; use sp_std::prelude::*; -use crate::types::{BeaconHeader, SyncAggregate, SyncCommittee, VersionedExecutionPayloadHeader}; +use crate::types::{BeaconHeader, SyncAggregate, SyncCommittee}; #[derive(Encode, Decode, CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound, TypeInfo)] #[cfg_attr( @@ -23,26 +23,13 @@ pub struct CheckpointUpdate { pub block_roots_branch: Vec, } -impl Default for CheckpointUpdate { - fn default() -> Self { - CheckpointUpdate { - header: Default::default(), - current_sync_committee: Default::default(), - current_sync_committee_branch: Default::default(), - validators_root: Default::default(), - block_roots_root: Default::default(), - block_roots_branch: Default::default(), - } - } -} - #[derive( Default, Encode, Decode, CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound, TypeInfo, )] #[cfg_attr( feature = "std", derive(serde::Deserialize), - serde(deny_unknown_fields, bound(serialize = ""), bound(deserialize = "")) + serde(bound(serialize = ""), bound(deserialize = "")) )] pub struct Update { /// A recent header attesting to the finalized header, using its `state_root`. @@ -78,33 +65,3 @@ pub struct NextSyncCommitteeUpdate { pub next_sync_committee: SyncCommittee, pub next_sync_committee_branch: Vec, } - -#[derive(Encode, Decode, CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound, TypeInfo)] -#[cfg_attr( - feature = "std", - derive(serde::Deserialize), - serde(deny_unknown_fields, bound(serialize = ""), bound(deserialize = "")) -)] -pub struct ExecutionHeaderUpdate { - /// Header for the beacon block containing the execution payload - pub header: BeaconHeader, - /// Proof that `header` is an ancestor of a finalized header - pub ancestry_proof: Option, - /// Execution header to be imported - pub execution_header: VersionedExecutionPayloadHeader, - /// Merkle proof that execution payload is contained within `header` - pub execution_branch: Vec, -} - -#[derive(Encode, Decode, CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound, TypeInfo)] -#[cfg_attr( - feature = "std", - derive(serde::Deserialize), - serde(deny_unknown_fields, bound(serialize = ""), bound(deserialize = "")) -)] -pub struct AncestryProof { - /// Merkle proof that `header` is an ancestor of `finalized_header` - pub header_branch: Vec, - /// Root of a finalized block that has already been imported into the light client - pub finalized_block_root: H256, -} diff --git a/bridges/snowbridge/primitives/core/src/inbound.rs b/bridges/snowbridge/primitives/core/src/inbound.rs index 4b04470ad02615d86f1a1c530f3cbed809649328..9e8ed789ab500917eb520176fdf39343a65838da 100644 --- a/bridges/snowbridge/primitives/core/src/inbound.rs +++ b/bridges/snowbridge/primitives/core/src/inbound.rs @@ -5,6 +5,7 @@ use codec::{Decode, Encode}; use frame_support::PalletError; use scale_info::TypeInfo; +use snowbridge_beacon_primitives::{BeaconHeader, ExecutionProof}; use sp_core::{H160, H256}; use sp_runtime::RuntimeDebug; use sp_std::vec::Vec; @@ -25,6 +26,8 @@ pub enum VerificationError { InvalidLog, /// Unable to verify the transaction receipt with the provided proof InvalidProof, + /// Unable to verify the execution header with ancestry proof + InvalidExecutionProof(#[codec(skip)] &'static str), } pub type MessageNonce = u64; @@ -65,10 +68,15 @@ impl Log { /// Inclusion proof for a transaction receipt #[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] pub struct Proof { - // The block hash of the block in which the receipt was included. - pub block_hash: H256, - // The index of the transaction (and receipt) within the block. - pub tx_index: u32, // Proof keys and values (receipts tree) - pub data: (Vec>, Vec>), + pub receipt_proof: (Vec>, Vec>), + // Proof that an execution header was finalized by the beacon chain + pub execution_proof: ExecutionProof, +} + +#[derive(Clone, RuntimeDebug)] +pub struct InboundQueueFixture { + pub message: Message, + pub finalized_header: BeaconHeader, + pub block_roots_root: H256, } diff --git a/bridges/snowbridge/runtime/test-common/Cargo.toml b/bridges/snowbridge/runtime/test-common/Cargo.toml index 4e8b311cb97812bb94140aa02405b3a174064a8f..90b4f38e721480d05c75a818bab86dec2be275f9 100644 --- a/bridges/snowbridge/runtime/test-common/Cargo.toml +++ b/bridges/snowbridge/runtime/test-common/Cargo.toml @@ -14,7 +14,7 @@ workspace = true codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } hex-literal = { version = "0.4.1" } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } smallvec = "1.11.0" diff --git a/bridges/snowbridge/runtime/test-common/src/lib.rs b/bridges/snowbridge/runtime/test-common/src/lib.rs index 7455adf76170acefd50f06e8a40ef1c79028f49f..3e2de0e481b868de773ad3b2946d91e31e1e71d1 100644 --- a/bridges/snowbridge/runtime/test-common/src/lib.rs +++ b/bridges/snowbridge/runtime/test-common/src/lib.rs @@ -467,7 +467,6 @@ pub fn ethereum_extrinsic( let initial_checkpoint = make_checkpoint(); let update = make_finalized_header_update(); let sync_committee_update = make_sync_committee_update(); - let execution_header_update = make_execution_header_update(); let alice = Alice; let alice_account = alice.to_account_id(); @@ -494,22 +493,12 @@ pub fn ethereum_extrinsic( } .into(); - let execution_header_call: ::RuntimeCall = - snowbridge_pallet_ethereum_client::Call::::submit_execution_header { - update: Box::new(*execution_header_update), - } - .into(); - let update_outcome = construct_and_apply_extrinsic(alice, update_call.into()); assert_ok!(update_outcome); let sync_committee_outcome = construct_and_apply_extrinsic(alice, update_sync_committee_call.into()); assert_ok!(sync_committee_outcome); - - let execution_header_outcome = - construct_and_apply_extrinsic(alice, execution_header_call.into()); - assert_ok!(execution_header_outcome); }); } @@ -548,7 +537,6 @@ pub fn ethereum_to_polkadot_message_extrinsics_work( .execute_with(|| { let initial_checkpoint = make_checkpoint(); let sync_committee_update = make_sync_committee_update(); - let execution_header_update = make_execution_header_update(); let alice = Alice; let alice_account = alice.to_account_id(); @@ -569,18 +557,8 @@ pub fn ethereum_to_polkadot_message_extrinsics_work( } .into(); - let execution_header_call: ::RuntimeCall = - snowbridge_pallet_ethereum_client::Call::::submit_execution_header { - update: Box::new(*execution_header_update), - } - .into(); - let sync_committee_outcome = construct_and_apply_extrinsic(alice, update_sync_committee_call.into()); assert_ok!(sync_committee_outcome); - - let execution_header_outcome = - construct_and_apply_extrinsic(alice, execution_header_call.into()); - assert_ok!(execution_header_outcome); }); } diff --git a/bridges/snowbridge/scripts/contribute-upstream.sh b/bridges/snowbridge/scripts/contribute-upstream.sh index 32005b770ecf44cb9af18c61f830243ed5287e68..529057c3f26feb8b3287648ccfc4fb72a649b71d 100755 --- a/bridges/snowbridge/scripts/contribute-upstream.sh +++ b/bridges/snowbridge/scripts/contribute-upstream.sh @@ -79,4 +79,10 @@ git fetch parity master git checkout parity/master -- .github git add -- .github +git commit -m "cleanup branch" + +# Fetch the latest from parity master +echo "Fetching latest from Parity master. Resolve merge conflicts, if there are any." +git fetch parity master +git merge parity/master echo "OK" diff --git a/bridges/testing/environments/rococo-westend/rococo.zndsl b/bridges/testing/environments/rococo-westend/rococo.zndsl index 5b49c7c632fa4dd0ce77134858a2f697acbfff16..a75286445a240eb302ac8fd23ae7fe707e6c0af3 100644 --- a/bridges/testing/environments/rococo-westend/rococo.zndsl +++ b/bridges/testing/environments/rococo-westend/rococo.zndsl @@ -1,7 +1,7 @@ -Description: Check if the with-Westend GRANPDA pallet was initialized at Rococo BH +Description: Check if the with-Westend GRANDPA pallet was initialized at Rococo BH Network: ./bridge_hub_rococo_local_network.toml Creds: config -# relay is already started - let's wait until with-Westend GRANPDA pallet is initialized at Rococo +# relay is already started - let's wait until with-Westend GRANDPA pallet is initialized at Rococo bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/best-finalized-header-at-bridged-chain.js with "Westend,0" within 400 seconds diff --git a/bridges/testing/environments/rococo-westend/westend.zndsl b/bridges/testing/environments/rococo-westend/westend.zndsl index 07968838852f7c0a00131db3080c460c07d08206..21d4ebf3b05b003083f7d35bf6bb7a1ca4c1ad05 100644 --- a/bridges/testing/environments/rococo-westend/westend.zndsl +++ b/bridges/testing/environments/rococo-westend/westend.zndsl @@ -1,6 +1,6 @@ -Description: Check if the with-Rococo GRANPDA pallet was initialized at Westend BH +Description: Check if the with-Rococo GRANDPA pallet was initialized at Westend BH Network: ./bridge_hub_westend_local_network.toml Creds: config -# relay is already started - let's wait until with-Rococo GRANPDA pallet is initialized at Westend +# relay is already started - let's wait until with-Rococo GRANDPA pallet is initialized at Westend bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/best-finalized-header-at-bridged-chain.js with "Rococo,0" within 400 seconds diff --git a/bridges/testing/framework/utils/generate_hex_encoded_call/index.js b/bridges/testing/framework/utils/generate_hex_encoded_call/index.js index 30f89d754ceb7de1b24bc31413e09c862a461256..c8e361b25a9ce3e56f3c839d53e2fc67106a98c8 100644 --- a/bridges/testing/framework/utils/generate_hex_encoded_call/index.js +++ b/bridges/testing/framework/utils/generate_hex_encoded_call/index.js @@ -126,36 +126,36 @@ if (!process.argv[2] || !process.argv[3]) { } const type = process.argv[2]; -const rpcEnpoint = process.argv[3]; +const rpcEndpoint = process.argv[3]; const output = process.argv[4]; const inputArgs = process.argv.slice(5, process.argv.length); console.log(`Generating hex-encoded call data for:`); console.log(` type: ${type}`); -console.log(` rpcEnpoint: ${rpcEnpoint}`); +console.log(` rpcEndpoint: ${rpcEndpoint}`); console.log(` output: ${output}`); console.log(` inputArgs: ${inputArgs}`); switch (type) { case 'remark-with-event': - remarkWithEvent(rpcEnpoint, output); + remarkWithEvent(rpcEndpoint, output); break; case 'add-exporter-config': - addExporterConfig(rpcEnpoint, output, inputArgs[0], inputArgs[1]); + addExporterConfig(rpcEndpoint, output, inputArgs[0], inputArgs[1]); break; case 'remove-exporter-config': - removeExporterConfig(rpcEnpoint, output, inputArgs[0], inputArgs[1]); + removeExporterConfig(rpcEndpoint, output, inputArgs[0], inputArgs[1]); break; case 'add-universal-alias': - addUniversalAlias(rpcEnpoint, output, inputArgs[0], inputArgs[1]); + addUniversalAlias(rpcEndpoint, output, inputArgs[0], inputArgs[1]); break; case 'add-reserve-location': - addReserveLocation(rpcEnpoint, output, inputArgs[0]); + addReserveLocation(rpcEndpoint, output, inputArgs[0]); break; case 'force-create-asset': - forceCreateAsset(rpcEnpoint, output, inputArgs[0], inputArgs[1], inputArgs[2], inputArgs[3]); + forceCreateAsset(rpcEndpoint, output, inputArgs[0], inputArgs[1], inputArgs[2], inputArgs[3]); break; case 'force-xcm-version': - forceXcmVersion(rpcEnpoint, output, inputArgs[0], inputArgs[1]); + forceXcmVersion(rpcEndpoint, output, inputArgs[0], inputArgs[1]); break; case 'check': console.log(`Checking nodejs installation, if you see this everything is ready!`); diff --git a/bridges/testing/run-tests.sh b/bridges/testing/run-tests.sh index 6149d9912653c79968a0229759c8f1bf46f68a9f..fd12b57f53349a0a449af7103c05341d3c94ceb9 100755 --- a/bridges/testing/run-tests.sh +++ b/bridges/testing/run-tests.sh @@ -30,7 +30,7 @@ done export POLKADOT_SDK_PATH=`realpath $(dirname "$0")/../..` export BRIDGE_TESTS_FOLDER=$POLKADOT_SDK_PATH/bridges/testing/tests -# set pathc to binaries +# set path to binaries if [ "$ZOMBIENET_DOCKER_PATHS" -eq 1 ]; then export POLKADOT_BINARY=/usr/local/bin/polkadot export POLKADOT_PARACHAIN_BINARY=/usr/local/bin/polkadot-parachain diff --git a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh b/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh index 7d5b8d9273664b0861e8ffe1c528e9e1718c4df4..3a604b3876d96241903c1c5a110cc6392f26cb7e 100755 --- a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh +++ b/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh @@ -24,7 +24,7 @@ echo -e "Sleeping 90s before starting relayer ...\n" sleep 90 ${BASH_SOURCE%/*}/../../environments/rococo-westend/start_relayer.sh $rococo_dir $westend_dir relayer_pid -# Sometimes the relayer syncs multiple parachain heads in the begining leading to test failures. +# Sometimes the relayer syncs multiple parachain heads in the beginning leading to test failures. # See issue: https://github.com/paritytech/parity-bridges-common/issues/2838. # TODO: Remove this sleep after the issue is fixed. echo -e "Sleeping 180s before runing the tests ...\n" diff --git a/cumulus/client/collator/Cargo.toml b/cumulus/client/collator/Cargo.toml index 0e911b9f3abfec1a894b50d63620f914268b78a9..42f7342d1a5381a6174c15909b815485eb7c8a7e 100644 --- a/cumulus/client/collator/Cargo.toml +++ b/cumulus/client/collator/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] parking_lot = "0.12.1" codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } -futures = "0.3.21" +futures = "0.3.30" tracing = "0.1.25" # Substrate @@ -34,7 +34,7 @@ cumulus-client-network = { path = "../network" } cumulus-primitives-core = { path = "../../primitives/core" } [dev-dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" # Substrate sp-maybe-compressed-blob = { path = "../../../substrate/primitives/maybe-compressed-blob" } diff --git a/cumulus/client/consensus/aura/Cargo.toml b/cumulus/client/consensus/aura/Cargo.toml index e815e89d8ce3bebcdcf52b363917ba17c0382708..70dd67cb9a00b2e7b0baf04dbfdaaff0386104e5 100644 --- a/cumulus/client/consensus/aura/Cargo.toml +++ b/cumulus/client/consensus/aura/Cargo.toml @@ -10,7 +10,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } futures = "0.3.28" tracing = "0.1.37" @@ -41,7 +41,7 @@ substrate-prometheus-endpoint = { path = "../../../../substrate/utils/prometheus cumulus-client-consensus-common = { path = "../common" } cumulus-relay-chain-interface = { path = "../../relay-chain-interface" } cumulus-client-consensus-proposer = { path = "../proposer" } -cumulus-client-parachain-inherent = { path = "../../../client/parachain-inherent" } +cumulus-client-parachain-inherent = { path = "../../parachain-inherent" } cumulus-primitives-aura = { path = "../../../primitives/aura" } cumulus-primitives-core = { path = "../../../primitives/core" } cumulus-client-collator = { path = "../../collator" } diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs index 161f10d55a193de35a2585e1a1f5725f30e19bf7..580058336174df21a225d64e96a52913b24c2ae7 100644 --- a/cumulus/client/consensus/aura/src/collators/lookahead.rs +++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs @@ -49,7 +49,7 @@ use polkadot_node_subsystem::messages::{ CollationGenerationMessage, RuntimeApiMessage, RuntimeApiRequest, }; use polkadot_overseer::Handle as OverseerHandle; -use polkadot_primitives::{CollatorPair, Id as ParaId, OccupiedCoreAssumption}; +use polkadot_primitives::{CollatorPair, CoreIndex, Id as ParaId, OccupiedCoreAssumption}; use futures::{channel::oneshot, prelude::*}; use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf}; @@ -184,7 +184,15 @@ where while let Some(relay_parent_header) = import_notifications.next().await { let relay_parent = relay_parent_header.hash(); - if !is_para_scheduled(relay_parent, params.para_id, &mut params.overseer_handle).await { + // TODO: Currently we use just the first core here, but for elastic scaling + // we iterate and build on all of the cores returned. + let core_index = if let Some(core_index) = + cores_scheduled_for_para(relay_parent, params.para_id, &mut params.overseer_handle) + .await + .get(0) + { + *core_index + } else { tracing::trace!( target: crate::LOG_TARGET, ?relay_parent, @@ -193,7 +201,7 @@ where ); continue - } + }; let max_pov_size = match params .relay_client @@ -396,6 +404,7 @@ where parent_head: parent_header.encode().into(), validation_code_hash, result_sender: None, + core_index, }, ), "SubmitCollation", @@ -480,14 +489,12 @@ async fn max_ancestry_lookback( } } -// Checks if there exists a scheduled core for the para at the provided relay parent. -// -// Falls back to `false` in case of an error. -async fn is_para_scheduled( +// Return all the cores assigned to the para at the provided relay parent. +async fn cores_scheduled_for_para( relay_parent: PHash, para_id: ParaId, overseer_handle: &mut OverseerHandle, -) -> bool { +) -> Vec { let (tx, rx) = oneshot::channel(); let request = RuntimeApiRequest::AvailabilityCores(tx); overseer_handle @@ -503,7 +510,7 @@ async fn is_para_scheduled( ?relay_parent, "Failed to query availability cores runtime API", ); - return false + return Vec::new() }, Err(oneshot::Canceled) => { tracing::error!( @@ -511,9 +518,19 @@ async fn is_para_scheduled( ?relay_parent, "Sender for availability cores runtime request dropped", ); - return false + return Vec::new() }, }; - cores.iter().any(|core| core.para_id() == Some(para_id)) + cores + .iter() + .enumerate() + .filter_map(|(index, core)| { + if core.para_id() == Some(para_id) { + Some(CoreIndex(index as u32)) + } else { + None + } + }) + .collect() } diff --git a/cumulus/client/consensus/common/Cargo.toml b/cumulus/client/consensus/common/Cargo.toml index 5a014b10e35f39b0a5e00ca01da7cfd3ecc50a5f..fb4a85ad122637470a81371eb12352a14ac7c61e 100644 --- a/cumulus/client/consensus/common/Cargo.toml +++ b/cumulus/client/consensus/common/Cargo.toml @@ -10,7 +10,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } dyn-clone = "1.0.16" futures = "0.3.28" diff --git a/cumulus/client/consensus/common/src/level_monitor.rs b/cumulus/client/consensus/common/src/level_monitor.rs index 270e3f57ae5a374aba2f22966c7376e0791020ca..fb4b0498f6887f8bea1d3e7ba4e7f4082e2f9747 100644 --- a/cumulus/client/consensus/common/src/level_monitor.rs +++ b/cumulus/client/consensus/common/src/level_monitor.rs @@ -158,7 +158,7 @@ where /// the limit passed to the constructor. /// /// If the given level is found to have a number of blocks greater than or equal the limit - /// then the limit is enforced by chosing one (or more) blocks to remove. + /// then the limit is enforced by choosing one (or more) blocks to remove. /// /// The removal strategy is driven by the block freshness. /// diff --git a/cumulus/client/consensus/common/src/tests.rs b/cumulus/client/consensus/common/src/tests.rs index bfb95ae388ae3cd31f5035a9c6195631adbb8809..7816d3a4c40a7ab02aceb86c701f127e45158470 100644 --- a/cumulus/client/consensus/common/src/tests.rs +++ b/cumulus/client/consensus/common/src/tests.rs @@ -832,7 +832,7 @@ fn restore_limit_monitor() { .collect::>(); // Scenario before limit application (with B11 imported as best) - // Import order (freshess): B00, B10, B11, B12, B20, B21 + // Import order (freshness): B00, B10, B11, B12, B20, B21 // // B00 --+-- B10 --+-- B20 // | +-- B21 diff --git a/cumulus/client/consensus/proposer/Cargo.toml b/cumulus/client/consensus/proposer/Cargo.toml index b37232bb4485d6f5ece63a3c940bd065c1d3f083..42ca4e06f8f45e410006304cf1290e811e153b4c 100644 --- a/cumulus/client/consensus/proposer/Cargo.toml +++ b/cumulus/client/consensus/proposer/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] anyhow = "1.0" -async-trait = "0.1.74" +async-trait = "0.1.79" thiserror = { workspace = true } # Substrate diff --git a/cumulus/client/consensus/relay-chain/Cargo.toml b/cumulus/client/consensus/relay-chain/Cargo.toml index 3d06d6b89ef7447b6196b1fc395ed7be54e3ba62..cb32b98045760de5621792c6ef45f76faf2e7e08 100644 --- a/cumulus/client/consensus/relay-chain/Cargo.toml +++ b/cumulus/client/consensus/relay-chain/Cargo.toml @@ -10,7 +10,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" futures = "0.3.28" parking_lot = "0.12.1" tracing = "0.1.37" diff --git a/cumulus/client/network/Cargo.toml b/cumulus/client/network/Cargo.toml index 995ef606d270ed688af3751016be9c8c37f80305..1210975ef690c4d110a7f96b2e88e4d151edfa87 100644 --- a/cumulus/client/network/Cargo.toml +++ b/cumulus/client/network/Cargo.toml @@ -10,7 +10,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } futures = "0.3.28" futures-timer = "3.0.2" diff --git a/cumulus/client/parachain-inherent/Cargo.toml b/cumulus/client/parachain-inherent/Cargo.toml index e00f3ba26066c037aca5e11fcb539ae6d95a4c85..6e9adab1ffc9e2bf9de9d8101eb38e4da8212b27 100644 --- a/cumulus/client/parachain-inherent/Cargo.toml +++ b/cumulus/client/parachain-inherent/Cargo.toml @@ -7,9 +7,9 @@ description = "Inherent that needs to be present in every parachain block. Conta license = "Apache-2.0" [dependencies] -async-trait = "0.1.73" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } -scale-info = { version = "2.10.0", features = ["derive"] } +scale-info = { version = "2.11.1", features = ["derive"] } tracing = { version = "0.1.37" } # Substrate diff --git a/cumulus/client/pov-recovery/Cargo.toml b/cumulus/client/pov-recovery/Cargo.toml index 375a57a87c2aa3fb92a6317c756b18c8117736f8..571935620d6d90b3aa1157124a2bae6fd73a19c2 100644 --- a/cumulus/client/pov-recovery/Cargo.toml +++ b/cumulus/client/pov-recovery/Cargo.toml @@ -32,7 +32,7 @@ polkadot-primitives = { path = "../../../polkadot/primitives" } # Cumulus cumulus-primitives-core = { path = "../../primitives/core" } cumulus-relay-chain-interface = { path = "../relay-chain-interface" } -async-trait = "0.1.74" +async-trait = "0.1.79" [dev-dependencies] tokio = { version = "1.32.0", features = ["macros"] } diff --git a/cumulus/client/pov-recovery/src/lib.rs b/cumulus/client/pov-recovery/src/lib.rs index 32aba6c8993a6da67cd3adeb394e87e6c067b46c..0ca21749c3eb557a78f0996c703e86b35cab6f17 100644 --- a/cumulus/client/pov-recovery/src/lib.rs +++ b/cumulus/client/pov-recovery/src/lib.rs @@ -18,7 +18,7 @@ //! //! A parachain needs to build PoVs that are send to the relay chain to progress. These PoVs are //! erasure encoded and one piece of it is stored by each relay chain validator. As the relay chain -//! decides on which PoV per parachain to include and thus, to progess the parachain it can happen +//! decides on which PoV per parachain to include and thus, to progress the parachain it can happen //! that the block corresponding to this PoV isn't propagated in the parachain network. This can //! have several reasons, either a malicious collator that managed to include its own PoV and //! doesn't want to share it with the rest of the network or maybe a collator went down before it @@ -338,8 +338,8 @@ where let mut blocks_to_delete = vec![hash]; while let Some(delete) = blocks_to_delete.pop() { - if let Some(childs) = self.waiting_for_parent.remove(&delete) { - blocks_to_delete.extend(childs.iter().map(BlockT::hash)); + if let Some(children) = self.waiting_for_parent.remove(&delete) { + blocks_to_delete.extend(children.iter().map(BlockT::hash)); } } self.clear_waiting_recovery(&hash); @@ -448,7 +448,7 @@ where /// Import the given `block`. /// - /// This will also recursivley drain `waiting_for_parent` and import them as well. + /// This will also recursively drain `waiting_for_parent` and import them as well. fn import_block(&mut self, block: Block) { let mut blocks = VecDeque::new(); @@ -495,7 +495,7 @@ where tracing::debug!( target: LOG_TARGET, block_hash = ?hash, - "Cound not recover. Block was never announced as candidate" + "Could not recover. Block was never announced as candidate" ); return }, diff --git a/cumulus/client/relay-chain-inprocess-interface/Cargo.toml b/cumulus/client/relay-chain-inprocess-interface/Cargo.toml index aa16230cd8aff0f313311c15269508156e57b544..7629b6c631a3a0195760eee7fd39e754c289dd3e 100644 --- a/cumulus/client/relay-chain-inprocess-interface/Cargo.toml +++ b/cumulus/client/relay-chain-inprocess-interface/Cargo.toml @@ -10,7 +10,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" futures = "0.3.28" futures-timer = "3.0.2" diff --git a/cumulus/client/relay-chain-interface/Cargo.toml b/cumulus/client/relay-chain-interface/Cargo.toml index 6e652b892104e929e5e0a5bb7ce8cf33d364e8e6..6df9847252fecf8e4bdcdec0ff9f830d6530668b 100644 --- a/cumulus/client/relay-chain-interface/Cargo.toml +++ b/cumulus/client/relay-chain-interface/Cargo.toml @@ -20,7 +20,7 @@ sp-state-machine = { path = "../../../substrate/primitives/state-machine" } sc-client-api = { path = "../../../substrate/client/api" } futures = "0.3.28" -async-trait = "0.1.74" +async-trait = "0.1.79" thiserror = { workspace = true } jsonrpsee-core = "0.22" parity-scale-codec = "3.6.4" diff --git a/cumulus/client/relay-chain-minimal-node/Cargo.toml b/cumulus/client/relay-chain-minimal-node/Cargo.toml index 98240c92adab38ea103f14699e1e49f7449b3d24..6860b42a5078d48e74bbbd7ee64fbab43e5d5e13 100644 --- a/cumulus/client/relay-chain-minimal-node/Cargo.toml +++ b/cumulus/client/relay-chain-minimal-node/Cargo.toml @@ -49,6 +49,6 @@ cumulus-primitives-core = { path = "../../primitives/core" } array-bytes = "6.1" tracing = "0.1.37" -async-trait = "0.1.74" +async-trait = "0.1.79" futures = "0.3.28" parking_lot = "0.12.1" diff --git a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs index 8d8a2920b4efaa70a9b59ba42ccd26a2e4cb64d5..aa5e67e453f69bd02920888a19d73b94e92addce 100644 --- a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs +++ b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs @@ -26,9 +26,7 @@ use polkadot_core_primitives::{Block, BlockNumber, Hash, Header}; use polkadot_overseer::{ChainApiBackend, RuntimeApiSubsystemClient}; use polkadot_primitives::{ async_backing::{AsyncBackingParams, BackingState}, - slashing, - vstaging::{ApprovalVotingParams, NodeFeatures}, - CoreIndex, + slashing, ApprovalVotingParams, CoreIndex, NodeFeatures, }; use sc_authority_discovery::{AuthorityDiscovery, Error as AuthorityDiscoveryError}; use sc_client_api::AuxStore; diff --git a/cumulus/client/relay-chain-rpc-interface/Cargo.toml b/cumulus/client/relay-chain-rpc-interface/Cargo.toml index 801712b1ad150a8a63e85c40ec0854a62b5969bc..14981677289561040875b0951dbdbffb5854a439 100644 --- a/cumulus/client/relay-chain-rpc-interface/Cargo.toml +++ b/cumulus/client/relay-chain-rpc-interface/Cargo.toml @@ -35,7 +35,7 @@ futures-timer = "3.0.2" parity-scale-codec = "3.6.4" jsonrpsee = { version = "0.22", features = ["ws-client"] } tracing = "0.1.37" -async-trait = "0.1.74" +async-trait = "0.1.79" url = "2.4.0" serde_json = { workspace = true, default-features = true } serde = { workspace = true, default-features = true } diff --git a/cumulus/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs b/cumulus/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs index b716feef1c998d66eba3c5ea28ee1dd98c4959e3..48d35dd3a55eee935fb27f2fbc60302e9ed3e76a 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/reconnecting_ws_client.rs @@ -293,7 +293,8 @@ impl ReconnectingWebsocketWorker { /// listeners. If an error occurs during sending, the receiver has been closed and we remove /// the sender from the list. /// - Find a new valid RPC server to connect to in case the websocket connection is terminated. - /// If the worker is not able to connec to an RPC server from the list, the worker shuts down. + /// If the worker is not able to connect to an RPC server from the list, the worker shuts + /// down. pub async fn run(mut self) { let mut pending_requests = FuturesUnordered::new(); diff --git a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs index 8cf5ccf0c707e5f8e3529462041f75a17ea6c117..547803865c28a28a7534b3a5d7e0075a2332c71b 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs @@ -32,13 +32,12 @@ use parity_scale_codec::{Decode, Encode}; use cumulus_primitives_core::{ relay_chain::{ async_backing::{AsyncBackingParams, BackingState}, - slashing, - vstaging::{ApprovalVotingParams, NodeFeatures}, - BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreIndex, CoreState, DisputeState, ExecutorParams, - GroupRotationInfo, Hash as RelayHash, Header as RelayHeader, InboundHrmpMessage, - OccupiedCoreAssumption, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, - ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, + slashing, ApprovalVotingParams, BlockNumber, CandidateCommitments, CandidateEvent, + CandidateHash, CommittedCandidateReceipt, CoreIndex, CoreState, DisputeState, + ExecutorParams, GroupRotationInfo, Hash as RelayHash, Header as RelayHeader, + InboundHrmpMessage, NodeFeatures, OccupiedCoreAssumption, PvfCheckStatement, + ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, + ValidatorId, ValidatorIndex, ValidatorSignature, }, InboundDownwardMessage, ParaId, PersistedValidationData, }; diff --git a/cumulus/client/service/Cargo.toml b/cumulus/client/service/Cargo.toml index 2bafbee951a950ce7144e593cab9ae4387b1a5f6..e03e20fe5b416102aa99739cb00c0b39edf5b999 100644 --- a/cumulus/client/service/Cargo.toml +++ b/cumulus/client/service/Cargo.toml @@ -30,6 +30,7 @@ sp-consensus = { path = "../../../substrate/primitives/consensus/common" } sp-core = { path = "../../../substrate/primitives/core" } sp-runtime = { path = "../../../substrate/primitives/runtime" } sp-transaction-pool = { path = "../../../substrate/primitives/transaction-pool" } +sp-io = { path = "../../../substrate/primitives/io" } # Polkadot polkadot-primitives = { path = "../../../polkadot/primitives" } diff --git a/cumulus/client/service/src/lib.rs b/cumulus/client/service/src/lib.rs index 950e59aff24ec19a2e0ed3b22215d6c77fb7dc5e..91e884d6f7ecec86906b5b06ead50a054aaf2577 100644 --- a/cumulus/client/service/src/lib.rs +++ b/cumulus/client/service/src/lib.rs @@ -54,6 +54,15 @@ use std::{sync::Arc, time::Duration}; pub use cumulus_primitives_proof_size_hostfunction::storage_proof_size; +/// Host functions that should be used in parachain nodes. +/// +/// Contains the standard substrate host functions, as well as a +/// host function to enable PoV-reclaim on parachain nodes. +pub type ParachainHostFunctions = ( + cumulus_primitives_proof_size_hostfunction::storage_proof_size::HostFunctions, + sp_io::SubstrateHostFunctions, +); + // Given the sporadic nature of the explicit recovery operation and the // possibility to retry infinite times this value is more than enough. // In practice here we expect no more than one queued messages. diff --git a/cumulus/pallets/aura-ext/Cargo.toml b/cumulus/pallets/aura-ext/Cargo.toml index ff30dce7b03394a523a811217de0db9b7dc5cd02..fe717596f9b307993f968ea0e7b58faef591dcf7 100644 --- a/cumulus/pallets/aura-ext/Cargo.toml +++ b/cumulus/pallets/aura-ext/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate frame-support = { path = "../../../substrate/frame/support", default-features = false } diff --git a/cumulus/pallets/collator-selection/Cargo.toml b/cumulus/pallets/collator-selection/Cargo.toml index 241a78466d61be3969930e031e1a52f4f3478482..c04d9e1403ec0e3d527e06c7b0c7c5e6f64150ad 100644 --- a/cumulus/pallets/collator-selection/Cargo.toml +++ b/cumulus/pallets/collator-selection/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] log = { workspace = true } codec = { default-features = false, features = ["derive"], package = "parity-scale-codec", version = "3.0.0" } rand = { version = "0.8.5", features = ["std_rng"], default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-std = { path = "../../../substrate/primitives/std", default-features = false } sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } diff --git a/cumulus/pallets/dmp-queue/Cargo.toml b/cumulus/pallets/dmp-queue/Cargo.toml index 83ed994d04167607e3df54587c49e6b88576d4ec..b2b24aeed72ba28ea320a90b8e725ce983fa1a07 100644 --- a/cumulus/pallets/dmp-queue/Cargo.toml +++ b/cumulus/pallets/dmp-queue/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } frame-support = { path = "../../../substrate/frame/support", default-features = false } diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index 7e0442f0b5856fa5153e29ff497cfee876c067ec..a905df5b94a644ffeb69ae73ecd4f2002c10ee92 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -16,7 +16,7 @@ environmental = { version = "1.1.4", default-features = false } impl-trait-for-tuples = "0.2.1" log = { workspace = true } trie-db = { version = "0.28.0", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 1c01ef33c7e7534647712609cc48494803946c3b..54a1def59600dff5dd8b1484691dcb18fff5ce63 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -464,7 +464,7 @@ pub mod pallet { // One complication here, is that the `host_configuration` is updated by an inherent // and those are processed after the block initialization phase. Therefore, we have to // be content only with the configuration as per the previous block. That means that - // the configuration can be either stale (or be abscent altogether in case of the + // the configuration can be either stale (or be absent altogether in case of the // beginning of the chain). // // In order to mitigate this, we do the following. At the time, we are only concerned diff --git a/cumulus/pallets/parachain-system/src/mock.rs b/cumulus/pallets/parachain-system/src/mock.rs index 0b1d536ba7cd9153d1409a65a87bc8234a09bd95..fe89dfe68c67e4b5d9f9e046dcf2a780a292da0a 100644 --- a/cumulus/pallets/parachain-system/src/mock.rs +++ b/cumulus/pallets/parachain-system/src/mock.rs @@ -125,6 +125,7 @@ impl pallet_message_queue::Config for Test { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MaxWeight; + type IdleMaxServiceWeight = (); type WeightInfo = (); } diff --git a/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs b/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs index 5519d1521ea6da85573f2ecdd9cca001616b5806..60eccfb072f41e87223d54f34b5c7eb59c00b2ad 100644 --- a/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs +++ b/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs @@ -90,7 +90,7 @@ pub enum Error { DmqMqcHead(ReadEntryErr), /// Relay dispatch queue cannot be extracted. RelayDispatchQueueRemainingCapacity(ReadEntryErr), - /// The hrmp inress channel index cannot be extracted. + /// The hrmp ingress channel index cannot be extracted. HrmpIngressChannelIndex(ReadEntryErr), /// The hrmp egress channel index cannot be extracted. HrmpEgressChannelIndex(ReadEntryErr), diff --git a/cumulus/pallets/solo-to-para/Cargo.toml b/cumulus/pallets/solo-to-para/Cargo.toml index f7dc5fe4de372b1b5a03ea00cc041e3e568332d3..417038d7833c5ca4ec4acf681cba73d24ab45c5e 100644 --- a/cumulus/pallets/solo-to-para/Cargo.toml +++ b/cumulus/pallets/solo-to-para/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate frame-support = { path = "../../../substrate/frame/support", default-features = false } diff --git a/cumulus/pallets/xcm/Cargo.toml b/cumulus/pallets/xcm/Cargo.toml index 63cb14b16e769fe8ddd309baf8d51594b47da49b..9122e110fb92e51b0f85e3ba4136d30178d8ab7a 100644 --- a/cumulus/pallets/xcm/Cargo.toml +++ b/cumulus/pallets/xcm/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-std = { path = "../../../substrate/primitives/std", default-features = false } sp-io = { path = "../../../substrate/primitives/io", default-features = false } diff --git a/cumulus/pallets/xcmp-queue/Cargo.toml b/cumulus/pallets/xcmp-queue/Cargo.toml index 9078d5eda997526b8f3ed7d9f118cc9c927dedc1..ab196c6d3ec6a089d8099d6ae893631ac513e9d7 100644 --- a/cumulus/pallets/xcmp-queue/Cargo.toml +++ b/cumulus/pallets/xcmp-queue/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"], default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate frame-support = { path = "../../../substrate/frame/support", default-features = false } diff --git a/cumulus/pallets/xcmp-queue/src/lib.rs b/cumulus/pallets/xcmp-queue/src/lib.rs index e92169be16b0b8466582f3dd143d9e3e1807a42d..b4cd925d540ead4ef17af337f59192c4cfec0042 100644 --- a/cumulus/pallets/xcmp-queue/src/lib.rs +++ b/cumulus/pallets/xcmp-queue/src/lib.rs @@ -462,7 +462,7 @@ impl Pallet { // Max message size refers to aggregates, or pages. Not to individual fragments. let max_message_size = channel_info.max_message_size as usize; let format_size = format.encoded_size(); - // We check the encoded fragment length plus the format size agains the max message size + // We check the encoded fragment length plus the format size against the max message size // because the format is concatenated if a new page is needed. let size_to_check = encoded_fragment .len() diff --git a/cumulus/pallets/xcmp-queue/src/migration.rs b/cumulus/pallets/xcmp-queue/src/migration.rs index c7fa61a3e3f0513e53d67fcee4efd6c3cecf29f6..1702cd70bc2fb7e09cf15fbe21294bdb76b96ef1 100644 --- a/cumulus/pallets/xcmp-queue/src/migration.rs +++ b/cumulus/pallets/xcmp-queue/src/migration.rs @@ -20,7 +20,7 @@ use crate::{Config, OverweightIndex, Pallet, QueueConfig, QueueConfigData, DEFAU use cumulus_primitives_core::XcmpMessageFormat; use frame_support::{ pallet_prelude::*, - traits::{EnqueueMessage, OnRuntimeUpgrade, StorageVersion}, + traits::{EnqueueMessage, StorageVersion, UncheckedOnRuntimeUpgrade}, weights::{constants::WEIGHT_REF_TIME_PER_MILLIS, Weight}, }; @@ -96,7 +96,7 @@ pub mod v2 { /// 2D weights). pub struct UncheckedMigrationToV2(PhantomData); - impl OnRuntimeUpgrade for UncheckedMigrationToV2 { + impl UncheckedOnRuntimeUpgrade for UncheckedMigrationToV2 { #[allow(deprecated)] fn on_runtime_upgrade() -> Weight { let translate = |pre: v1::QueueConfigData| -> v2::QueueConfigData { @@ -187,7 +187,7 @@ pub mod v3 { /// Migrates the pallet storage to v3. pub struct UncheckedMigrationToV3(PhantomData); - impl OnRuntimeUpgrade for UncheckedMigrationToV3 { + impl UncheckedOnRuntimeUpgrade for UncheckedMigrationToV3 { fn on_runtime_upgrade() -> Weight { #[frame_support::storage_alias] type Overweight = @@ -266,7 +266,7 @@ pub mod v4 { /// thresholds to at least the default values. pub struct UncheckedMigrationToV4(PhantomData); - impl OnRuntimeUpgrade for UncheckedMigrationToV4 { + impl UncheckedOnRuntimeUpgrade for UncheckedMigrationToV4 { fn on_runtime_upgrade() -> Weight { let translate = |pre: v2::QueueConfigData| -> QueueConfigData { let pre_default = v2::QueueConfigData::default(); @@ -315,6 +315,7 @@ pub mod v4 { mod tests { use super::*; use crate::mock::{new_test_ext, Test}; + use frame_support::traits::OnRuntimeUpgrade; #[test] #[allow(deprecated)] diff --git a/cumulus/parachains/chain-specs/coretime-westend.json b/cumulus/parachains/chain-specs/coretime-westend.json index c79fd582348b0223cd4c1de71b94075751cfdeb2..adb35b8a349f64b87d9aa96feb84e5068f261ad0 100644 --- a/cumulus/parachains/chain-specs/coretime-westend.json +++ b/cumulus/parachains/chain-specs/coretime-westend.json @@ -4,7 +4,8 @@ "chainType": "Live", "bootNodes": [ "/dns/westend-coretime-collator-node-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWP93Dzk8T7GWxyWw9jhLcz8Pksokk3R9vL2eEH337bNkT", - "/dns/westend-coretime-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWMh2imeAzsZKGQgm2cv6Uoep3GBYtwGfujt1bs5YfVzkH" + "/dns/westend-coretime-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWMh2imeAzsZKGQgm2cv6Uoep3GBYtwGfujt1bs5YfVzkH", + "/dns/boot.metaspan.io/tcp/33019/p2p/12D3KooWCa1uNnEZqiqJY9jkKNQxwSLGPeZ5MjWHhjQMGwga9JMM" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/common/Cargo.toml b/cumulus/parachains/common/Cargo.toml index ebc9f822beb2c0069276876161158243501ab288..fa16205d0fd1adee1491cf3abb92a55829b16a44 100644 --- a/cumulus/parachains/common/Cargo.toml +++ b/cumulus/parachains/common/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"], default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate frame-support = { path = "../../../substrate/frame/support", default-features = false } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml index f4f8b3603ba61e44195d6be8151b296f002ac720..98762beb0cb23132c3880515287328bb09bde032 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml @@ -17,7 +17,7 @@ sp-core = { path = "../../../../../../../../substrate/primitives/core", default- frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } # Cumulus -parachains-common = { path = "../../../../../../../parachains/common" } +parachains-common = { path = "../../../../../../common" } cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } emulated-integration-tests-common = { path = "../../../../common", default-features = false } asset-hub-rococo-runtime = { path = "../../../../../../runtimes/assets/asset-hub-rococo" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs index 2acccb9649b89aa8aea6349026fb72ea511cad78..e5378b35f5e484e10db94c66bc5244099b682604 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs @@ -20,7 +20,7 @@ use sp_core::{sr25519, storage::Storage}; // Cumulus use emulated_integration_tests_common::{ accounts, build_genesis_storage, collators, get_account_id_from_seed, - PenpalSiblingSovereigAccount, PenpalTeleportableAssetLocation, RESERVABLE_ASSET_ID, + PenpalSiblingSovereignAccount, PenpalTeleportableAssetLocation, RESERVABLE_ASSET_ID, SAFE_XCM_VERSION, }; use parachains_common::{AccountId, Balance}; @@ -76,7 +76,7 @@ pub fn genesis() -> Storage { // Penpal's teleportable asset representation ( PenpalTeleportableAssetLocation::get(), - PenpalSiblingSovereigAccount::get(), + PenpalSiblingSovereignAccount::get(), true, ED, ), diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml index d4764f63bf64d3cc650d53ee8c637129448121a2..a42a9abf618d403852561d5d4b20e7fb6ad576e7 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml @@ -17,7 +17,7 @@ sp-core = { path = "../../../../../../../../substrate/primitives/core", default- frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } # Cumulus -parachains-common = { path = "../../../../../../../parachains/common" } +parachains-common = { path = "../../../../../../common" } cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } emulated-integration-tests-common = { path = "../../../../common", default-features = false } asset-hub-westend-runtime = { path = "../../../../../../runtimes/assets/asset-hub-westend" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs index e30529aff42ca02a02ab9c93dc502f7fd50d02e4..219d1306906cbc6c20609870bec496c5bd16eaeb 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs @@ -20,7 +20,7 @@ use sp_core::{sr25519, storage::Storage}; // Cumulus use emulated_integration_tests_common::{ accounts, build_genesis_storage, collators, get_account_id_from_seed, - PenpalSiblingSovereigAccount, PenpalTeleportableAssetLocation, RESERVABLE_ASSET_ID, + PenpalSiblingSovereignAccount, PenpalTeleportableAssetLocation, RESERVABLE_ASSET_ID, SAFE_XCM_VERSION, }; use parachains_common::{AccountId, Balance}; @@ -72,7 +72,7 @@ pub fn genesis() -> Storage { // Penpal's teleportable asset representation ( PenpalTeleportableAssetLocation::get(), - PenpalSiblingSovereigAccount::get(), + PenpalSiblingSovereignAccount::get(), true, ED, ), diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/Cargo.toml index 322d8b44e6ea6b293fb1c2f52e0de80fa9cd29f1..789f10a35f268c62e0cc9fa153c99e7e5282ee8d 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/Cargo.toml @@ -17,7 +17,7 @@ sp-core = { path = "../../../../../../../../substrate/primitives/core", default- frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } # Cumulus -parachains-common = { path = "../../../../../../../parachains/common" } +parachains-common = { path = "../../../../../../common" } emulated-integration-tests-common = { path = "../../../../common", default-features = false } bridge-hub-rococo-runtime = { path = "../../../../../../runtimes/bridge-hubs/bridge-hub-rococo" } bridge-hub-common = { path = "../../../../../../runtimes/bridge-hubs/common", default-features = false } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/Cargo.toml index ec1386b7f6e2b231e7f9ffdc925b3d0c802bf4f2..d82971cf55aeddf20032be952b8a980014434f6b 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/Cargo.toml @@ -17,7 +17,7 @@ sp-core = { path = "../../../../../../../../substrate/primitives/core", default- frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } # Cumulus -parachains-common = { path = "../../../../../../../parachains/common" } +parachains-common = { path = "../../../../../../common" } emulated-integration-tests-common = { path = "../../../../common", default-features = false } bridge-hub-westend-runtime = { path = "../../../../../../runtimes/bridge-hubs/bridge-hub-westend" } bridge-hub-common = { path = "../../../../../../runtimes/bridge-hubs/common", default-features = false } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/Cargo.toml index 03f755b666afbf87e876a7f7d5111aa98a1bc727..4c2a7d3c274dce6eade9c9d42be00301bd6dc462 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/Cargo.toml @@ -17,7 +17,7 @@ sp-core = { path = "../../../../../../../../substrate/primitives/core", default- frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } # Cumulus -parachains-common = { path = "../../../../../../../parachains/common" } +parachains-common = { path = "../../../../../../common" } cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } emulated-integration-tests-common = { path = "../../../../common", default-features = false } collectives-westend-runtime = { path = "../../../../../../runtimes/collectives/collectives-westend" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/Cargo.toml index 65a358d0ef2f6b697453f2c1cbef20d3b2cd93a8..f7fe93d27775a28cb560d8791a3b0d8ed49c9d68 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/Cargo.toml @@ -14,7 +14,7 @@ sp-core = { path = "../../../../../../../../substrate/primitives/core", default- frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } # Cumulus -parachains-common = { path = "../../../../../../../parachains/common" } +parachains-common = { path = "../../../../../../common" } cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } emulated-integration-tests-common = { path = "../../../../common", default-features = false } people-rococo-runtime = { path = "../../../../../../runtimes/people/people-rococo" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/Cargo.toml index 075698848bcf0705c00ea46e3443802ca2fbe6b0..57a767e0c2a3eb7d23df7f8d95fd78128c996f35 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/Cargo.toml @@ -14,7 +14,7 @@ sp-core = { path = "../../../../../../../../substrate/primitives/core", default- frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } # Cumulus -parachains-common = { path = "../../../../../../../parachains/common" } +parachains-common = { path = "../../../../../../common" } cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } emulated-integration-tests-common = { path = "../../../../common", default-features = false } people-westend-runtime = { path = "../../../../../../runtimes/people/people-westend" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml index f47350b00eb18f46ad33f23ca490664c726dc878..2ac508273c6158ddae08615d8574102f98e3e788 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml @@ -20,7 +20,7 @@ frame-support = { path = "../../../../../../../../substrate/frame/support", defa xcm = { package = "staging-xcm", path = "../../../../../../../../polkadot/xcm", default-features = false } # Cumulus -parachains-common = { path = "../../../../../../../parachains/common" } +parachains-common = { path = "../../../../../../common" } cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } emulated-integration-tests-common = { path = "../../../../common", default-features = false } penpal-runtime = { path = "../../../../../../runtimes/testing/penpal" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/genesis.rs index 48901647fd052ec1499461ff7cb1dfa8cf797e82..d81ab8143ddba678617aaa67db122298ce29606c 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/genesis.rs @@ -31,8 +31,8 @@ pub const PARA_ID_B: u32 = 2001; pub const ED: Balance = penpal_runtime::EXISTENTIAL_DEPOSIT; parameter_types! { - pub PenpalSudoAcccount: AccountId = get_account_id_from_seed::("Alice"); - pub PenpalAssetOwner: AccountId = PenpalSudoAcccount::get(); + pub PenpalSudoAccount: AccountId = get_account_id_from_seed::("Alice"); + pub PenpalAssetOwner: AccountId = PenpalSudoAccount::get(); } pub fn genesis(para_id: u32) -> Storage { @@ -66,7 +66,7 @@ pub fn genesis(para_id: u32) -> Storage { safe_xcm_version: Some(SAFE_XCM_VERSION), ..Default::default() }, - sudo: penpal_runtime::SudoConfig { key: Some(PenpalSudoAcccount::get()) }, + sudo: penpal_runtime::SudoConfig { key: Some(PenpalSudoAccount::get()) }, assets: penpal_runtime::AssetsConfig { assets: vec![( penpal_runtime::xcm_config::TELEPORTABLE_ASSET_ID, diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs index 651b3a52306767446943d43a6616193ca8205838..0b49c7a3e091a615632728b6ba74ca4dffefae66 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs @@ -14,7 +14,7 @@ // limitations under the License. mod genesis; -pub use genesis::{genesis, PenpalAssetOwner, PenpalSudoAcccount, ED, PARA_ID_A, PARA_ID_B}; +pub use genesis::{genesis, PenpalAssetOwner, PenpalSudoAccount, ED, PARA_ID_A, PARA_ID_B}; pub use penpal_runtime::xcm_config::{ CustomizableAssetFromSystemAssetHub, LocalTeleportableToAssetHub, XcmConfig, }; diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml index 2d27426cca7599e3284caa6c8ca69a2d546a8da2..7ac65b0ee1ded60939072ff61d6886fa53a10b94 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml @@ -25,5 +25,5 @@ rococo-runtime-constants = { path = "../../../../../../../polkadot/runtime/rococ rococo-runtime = { path = "../../../../../../../polkadot/runtime/rococo" } # Cumulus -parachains-common = { path = "../../../../../../parachains/common" } +parachains-common = { path = "../../../../../common" } emulated-integration-tests-common = { path = "../../../common", default-features = false } diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml index abc40c2040681cc4cb41246a9872d557134b6dde..12a3ad60e0e046f1881d322118d522da3f9b52b1 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml @@ -11,7 +11,6 @@ publish = false workspace = true [dependencies] - # Substrate sp-core = { path = "../../../../../../../substrate/primitives/core", default-features = false } sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } @@ -27,5 +26,5 @@ westend-runtime-constants = { path = "../../../../../../../polkadot/runtime/west westend-runtime = { path = "../../../../../../../polkadot/runtime/westend" } # Cumulus -parachains-common = { path = "../../../../../../parachains/common" } +parachains-common = { path = "../../../../../common" } emulated-integration-tests-common = { path = "../../../common", default-features = false } diff --git a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs index 5fc08dff32c454d053816f547ef90ec9b70e8b0c..618c3addc5d0c67c3954610425345d3ec8b2f36b 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs @@ -114,7 +114,7 @@ where .expect("Bridge message does not exist") .into(); let payload = Vec::::decode(&mut &encoded_payload[..]) - .expect("Decodign XCM message failed"); + .expect("Decoding XCM message failed"); let id: u32 = LaneIdWrapper(*lane).into(); let message = BridgeMessage { id, nonce, payload }; @@ -265,7 +265,7 @@ macro_rules! impl_assert_events_helpers_for_relay_chain { $crate::impls::assert_expected_events!( Self, vec![ - // XCM is successfully received and proccessed + // XCM is successfully received and processed [<$chain RuntimeEvent>]::::MessageQueue($crate::impls::pallet_message_queue::Event::Processed { origin: $crate::impls::AggregateMessageOrigin::Ump($crate::impls::UmpQueueId::Para(id)), weight_used, @@ -343,7 +343,7 @@ macro_rules! impl_hrmp_channels_helpers_for_relay_chain { ::Runtime, >::contains_key(&channel_id); - // Check the HRMP channel has been successfully registrered + // Check the HRMP channel has been successfully registered assert!(hrmp_channel_exist) }); } @@ -362,7 +362,7 @@ macro_rules! impl_send_transact_helpers_for_relay_chain { recipient: $crate::impls::ParaId, call: $crate::impls::DoubleEncoded<()> ) { - use $crate::impls::{bx, Chain, RelayChain}; + use $crate::impls::{bx, Chain, RelayChain, Encode}; ::execute_with(|| { let root_origin = ::RuntimeOrigin::root(); @@ -370,10 +370,10 @@ macro_rules! impl_send_transact_helpers_for_relay_chain { let xcm = $crate::impls::xcm_transact_unpaid_execution(call, $crate::impls::OriginKind::Superuser); // Send XCM `Transact` - $crate::impls::assert_ok!(]>::XcmPallet::send( + $crate::impls::assert_ok!(]>::XcmPallet::send_blob( root_origin, bx!(destination.into()), - bx!(xcm), + xcm.encode().try_into().unwrap(), )); Self::assert_xcm_pallet_sent(); }); diff --git a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs index 40204ca297a0a2626e4c3946a090199c3269e7e4..cbde0642f1a2965579196cda7e7ada4f291e9d98 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs @@ -67,7 +67,7 @@ parameter_types! { xcm::v3::Junction::GeneralIndex(TELEPORTABLE_ASSET_ID.into()), ] ); - pub PenpalSiblingSovereigAccount: AccountId = Sibling::from(PENPAL_ID).into_account_truncating(); + pub PenpalSiblingSovereignAccount: AccountId = Sibling::from(PENPAL_ID).into_account_truncating(); } /// Helper function to generate a crypto pair from seed diff --git a/cumulus/parachains/integration-tests/emulated/common/src/macros.rs b/cumulus/parachains/integration-tests/emulated/common/src/macros.rs index 6951de6faa728d1ea88bbbc4cfb7f7683ab14935..6f6bbe41e01bd208ee6d40a9f3b4ba8f98f7975b 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/macros.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/macros.rs @@ -115,7 +115,7 @@ macro_rules! test_parachain_is_trusted_teleporter { let para_receiver_balance_after = <$receiver_para as $crate::macros::Chain>::account_data_of(receiver.clone()).free; let delivery_fees = <$sender_para>::execute_with(|| { - $crate::macros::asset_test_utils::xcm_helpers::transfer_assets_delivery_fees::< + $crate::macros::asset_test_utils::xcm_helpers::teleport_assets_delivery_fees::< <$sender_xcm_config as xcm_executor::Config>::XcmSender, >($assets.clone(), fee_asset_item, weight_limit.clone(), beneficiary, para_destination) }); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml index 13eb7d8dfc49a7010f61c2f106c508a7f9bae031..9b519da4b1d82d5536e21f001cced0cec37197a8 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml @@ -30,7 +30,7 @@ rococo-runtime = { path = "../../../../../../../polkadot/runtime/rococo" } # Cumulus asset-test-utils = { path = "../../../../../runtimes/assets/test-utils" } -parachains-common = { path = "../../../../../../parachains/common" } +parachains-common = { path = "../../../../../common" } cumulus-pallet-parachain-system = { path = "../../../../../../pallets/parachain-system", default-features = false } testnet-parachains-constants = { path = "../../../../../runtimes/constants", features = ["rococo"] } asset-hub-rococo-runtime = { path = "../../../../../runtimes/assets/asset-hub-rococo" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs index 21d858f1fe514b8005c968f670f7ff05714d2f79..a5a4914e21d826ea6c70af4ae31a0d4dee43ef64 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs @@ -63,17 +63,13 @@ mod imports { // Runtimes pub use asset_hub_rococo_runtime::xcm_config::{ - TokenLocation as RelayLocation, UniversalLocation as AssetHubRococoUniversalLocation, - XcmConfig as AssetHubRococoXcmConfig, + TokenLocation as RelayLocation, XcmConfig as AssetHubRococoXcmConfig, }; pub use penpal_runtime::xcm_config::{ LocalReservableFromAssetHub as PenpalLocalReservableFromAssetHub, LocalTeleportableToAssetHub as PenpalLocalTeleportableToAssetHub, - UniversalLocation as PenpalUniversalLocation, XcmConfig as PenpalRococoXcmConfig, - }; - pub use rococo_runtime::xcm_config::{ - UniversalLocation as RococoUniversalLocation, XcmConfig as RococoXcmConfig, }; + pub use rococo_runtime::xcm_config::XcmConfig as RococoXcmConfig; pub const ASSET_ID: u32 = 3; pub const ASSET_MIN_BALANCE: u128 = 1000; diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs index 705c9613b647a03c444fa304d69cb046d1df20b5..a0738839087a51e87df3187ac6f06d2889cce64e 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs @@ -524,7 +524,6 @@ fn reserve_transfer_native_asset_from_relay_to_para() { let destination = Rococo::child_location_of(PenpalA::para_id()); let sender = RococoSender::get(); let amount_to_send: Balance = ROCOCO_ED * 1000; - let assets: Assets = (Here, amount_to_send).into(); // Init values fot Parachain let relay_native_asset_location = @@ -552,15 +551,6 @@ fn reserve_transfer_native_asset_from_relay_to_para() { test.set_dispatchable::(relay_to_para_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = Rococo::execute_with(|| { - let reanchored_assets = - assets.reanchored(&destination, &RococoUniversalLocation::get()).unwrap(); - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(reanchored_assets, 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_balance_after = test.sender.balance; let receiver_assets_after = PenpalA::execute_with(|| { @@ -568,8 +558,8 @@ fn reserve_transfer_native_asset_from_relay_to_para() { >::balance(relay_native_asset_location.into(), &receiver) }); - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_balance_after < sender_balance_before - amount_to_send); // Receiver's asset balance is increased assert!(receiver_assets_after > receiver_assets_before); // Receiver's asset balance increased by `amount_to_send - delivery_fees - bought_execution`; @@ -595,7 +585,7 @@ fn reserve_transfer_native_asset_from_para_to_relay() { ::RuntimeOrigin::signed(asset_owner), relay_native_asset_location, sender.clone(), - amount_to_send, + amount_to_send * 2, ); // Init values for Relay @@ -634,15 +624,6 @@ fn reserve_transfer_native_asset_from_para_to_relay() { test.set_dispatchable::(para_to_relay_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = PenpalA::execute_with(|| { - let reanchored_assets = - assets.reanchored(&destination, &PenpalUniversalLocation::get()).unwrap(); - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(reanchored_assets, 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_assets_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; @@ -650,8 +631,8 @@ fn reserve_transfer_native_asset_from_para_to_relay() { }); let receiver_balance_after = test.receiver.balance; - // Sender's balance is reduced - assert_eq!(sender_assets_before - amount_to_send - delivery_fees, sender_assets_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_assets_after < sender_assets_before - amount_to_send); // Receiver's asset balance is increased assert!(receiver_balance_after > receiver_balance_before); // Receiver's asset balance increased by `amount_to_send - delivery_fees - bought_execution`; @@ -705,16 +686,6 @@ fn reserve_transfer_native_asset_from_system_para_to_para() { test.set_dispatchable::(system_para_to_para_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = AssetHubRococo::execute_with(|| { - let reanchored_assets = assets - .reanchored(&destination, &AssetHubRococoUniversalLocation::get()) - .unwrap(); - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(reanchored_assets, 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_balance_after = test.sender.balance; let receiver_assets_after = PenpalA::execute_with(|| { @@ -722,8 +693,8 @@ fn reserve_transfer_native_asset_from_system_para_to_para() { >::balance(system_para_native_asset_location, &receiver) }); - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_balance_after < sender_balance_before - amount_to_send); // Receiver's assets is increased assert!(receiver_assets_after > receiver_assets_before); // Receiver's assets increased by `amount_to_send - delivery_fees - bought_execution`; @@ -738,7 +709,7 @@ fn reserve_transfer_native_asset_from_para_to_system_para() { // Init values for Parachain let destination = PenpalA::sibling_location_of(AssetHubRococo::para_id()); let sender = PenpalASender::get(); - let amount_to_send: Balance = ASSET_HUB_ROCOCO_ED * 1000; + let amount_to_send: Balance = ASSET_HUB_ROCOCO_ED * 10000; let assets: Assets = (Parent, amount_to_send).into(); let system_para_native_asset_location = v3::Location::try_from(RelayLocation::get()).expect("conversion works"); @@ -749,7 +720,7 @@ fn reserve_transfer_native_asset_from_para_to_system_para() { ::RuntimeOrigin::signed(asset_owner), system_para_native_asset_location, sender.clone(), - amount_to_send, + amount_to_send * 2, ); // Init values for System Parachain @@ -788,15 +759,6 @@ fn reserve_transfer_native_asset_from_para_to_system_para() { test.set_dispatchable::(para_to_system_para_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = PenpalA::execute_with(|| { - let reanchored_assets = - assets.reanchored(&destination, &PenpalUniversalLocation::get()).unwrap(); - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(reanchored_assets, 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_assets_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; @@ -804,8 +766,8 @@ fn reserve_transfer_native_asset_from_para_to_system_para() { }); let receiver_balance_after = test.receiver.balance; - // Sender's balance is reduced - assert_eq!(sender_assets_before - amount_to_send - delivery_fees, sender_assets_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_assets_after < sender_assets_before - amount_to_send); // Receiver's balance is increased assert!(receiver_balance_after > receiver_balance_before); // Receiver's balance increased by `amount_to_send - delivery_fees - bought_execution`; @@ -1084,13 +1046,13 @@ fn reserve_transfer_native_asset_from_para_to_para_trough_relay() { ::RuntimeOrigin::signed(asset_owner), relay_native_asset_location, sender.clone(), - amount_to_send, + amount_to_send * 2, ); // fund the Parachain Origin's SA on Relay Chain with the native tokens held in reserve Rococo::fund_accounts(vec![(sov_of_sender_on_relay.into(), amount_to_send * 2)]); - // Init values for Parachain Desitnation + // Init values for Parachain Destination let receiver = PenpalBReceiver::get(); // Init Test @@ -1118,13 +1080,6 @@ fn reserve_transfer_native_asset_from_para_to_para_trough_relay() { test.set_dispatchable::(para_to_para_through_relay_limited_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = PenpalA::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_assets_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; @@ -1135,8 +1090,8 @@ fn reserve_transfer_native_asset_from_para_to_para_trough_relay() { >::balance(relay_native_asset_location, &receiver) }); - // Sender's balance is reduced - assert_eq!(sender_assets_before - amount_to_send - delivery_fees, sender_assets_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_assets_after < sender_assets_before - amount_to_send); // Receiver's balance is increased assert!(receiver_assets_after > receiver_assets_before); } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/send.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/send.rs index 364fbd0d439f62ed1fce356d1935331ec8e0d90b..1d120f1dc4c7ed4f923514e0692f3a4beb48103a 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/send.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/send.rs @@ -75,10 +75,10 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_system_assets_works() { )]); PenpalA::execute_with(|| { - assert_ok!(::PolkadotXcm::send( + assert_ok!(::PolkadotXcm::send_blob( root_origin, bx!(system_para_destination), - bx!(xcm), + xcm.encode().try_into().unwrap(), )); PenpalA::assert_xcm_pallet_sent(); @@ -159,10 +159,10 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_assets_works() { )]); PenpalA::execute_with(|| { - assert_ok!(::PolkadotXcm::send( + assert_ok!(::PolkadotXcm::send_blob( root_origin, bx!(system_para_destination), - bx!(xcm), + xcm.encode().try_into().unwrap(), )); PenpalA::assert_xcm_pallet_sent(); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs index 87f0b3d9f90a6f57d61a6352233db4858ba64ca3..e13300b7c11426416f543c2ee026702277b695b1 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs @@ -370,10 +370,10 @@ fn pay_xcm_fee_with_some_asset_swapped_for_native() { penpal.clone(), ); - assert_ok!(::PolkadotXcm::send( + assert_ok!(::PolkadotXcm::send_blob( penpal_root, bx!(asset_hub_location), - bx!(xcm), + xcm.encode().try_into().unwrap(), )); PenpalA::assert_xcm_pallet_sent(); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs index 0cc5ddb9f64d31c93fb73a6bbacf1844b5e161f6..1cbb7fb8c193accc65ef160a3a09514bd51debf5 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs @@ -245,16 +245,6 @@ fn relay_limited_teleport_assets(t: RelayToSystemParaTest) -> DispatchResult { ) } -fn relay_teleport_assets(t: RelayToSystemParaTest) -> DispatchResult { - ::XcmPallet::teleport_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - ) -} - fn system_para_limited_teleport_assets(t: SystemParaToRelayTest) -> DispatchResult { ::PolkadotXcm::limited_teleport_assets( t.signed_origin, @@ -266,16 +256,6 @@ fn system_para_limited_teleport_assets(t: SystemParaToRelayTest) -> DispatchResu ) } -fn system_para_teleport_assets(t: SystemParaToRelayTest) -> DispatchResult { - ::PolkadotXcm::teleport_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - ) -} - fn para_to_system_para_transfer_assets(t: ParaToSystemParaTest) -> DispatchResult { ::PolkadotXcm::transfer_assets( t.signed_origin, @@ -322,7 +302,7 @@ fn limited_teleport_native_assets_from_relay_to_system_para_works() { test.assert(); let delivery_fees = Rococo::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -369,7 +349,7 @@ fn limited_teleport_native_assets_back_from_system_para_to_relay_works() { let receiver_balance_after = test.receiver.balance; let delivery_fees = AssetHubRococo::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -410,7 +390,7 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { let receiver_balance_after = test.receiver.balance; let delivery_fees = AssetHubRococo::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -421,129 +401,6 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { assert_eq!(receiver_balance_after, receiver_balance_before); } -/// Teleport of native asset from Relay Chain to the System Parachain should work -#[test] -fn teleport_native_assets_from_relay_to_system_para_works() { - // Init values for Relay Chain - let amount_to_send: Balance = ROCOCO_ED * 1000; - let dest = Rococo::child_location_of(AssetHubRococo::para_id()); - let beneficiary_id = AssetHubRococoReceiver::get(); - let test_args = TestContext { - sender: RococoSender::get(), - receiver: AssetHubRococoReceiver::get(), - args: TestArgs::new_relay(dest, beneficiary_id, amount_to_send), - }; - - let mut test = RelayToSystemParaTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(relay_origin_assertions); - test.set_assertion::(para_dest_assertions); - test.set_dispatchable::(relay_teleport_assets); - test.assert(); - - let delivery_fees = Rococo::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // Receiver's balance is increased - assert!(receiver_balance_after > receiver_balance_before); -} - -/// Teleport of native asset from System Parachains to the Relay Chain -/// should work when there is enough balance in Relay Chain's `CheckAccount` -#[test] -fn teleport_native_assets_back_from_system_para_to_relay_works() { - // Dependency - Relay Chain's `CheckAccount` should have enough balance - teleport_native_assets_from_relay_to_system_para_works(); - - // Init values for Relay Chain - let amount_to_send: Balance = ASSET_HUB_ROCOCO_ED * 1000; - let destination = AssetHubRococo::parent_location(); - let beneficiary_id = RococoReceiver::get(); - let assets = (Parent, amount_to_send).into(); - - let test_args = TestContext { - sender: AssetHubRococoSender::get(), - receiver: RococoReceiver::get(), - args: TestArgs::new_para(destination, beneficiary_id, amount_to_send, assets, None, 0), - }; - - let mut test = SystemParaToRelayTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(para_origin_assertions); - test.set_assertion::(relay_dest_assertions); - test.set_dispatchable::(system_para_teleport_assets); - test.assert(); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - let delivery_fees = AssetHubRococo::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // Receiver's balance is increased - assert!(receiver_balance_after > receiver_balance_before); -} - -/// Teleport of native asset from System Parachain to Relay Chain -/// shouldn't work when there is not enough balance in Relay Chain's `CheckAccount` -#[test] -fn teleport_native_assets_from_system_para_to_relay_fails() { - // Init values for Relay Chain - let amount_to_send: Balance = ASSET_HUB_ROCOCO_ED * 1000; - let destination = AssetHubRococo::parent_location(); - let beneficiary_id = RococoReceiver::get(); - let assets = (Parent, amount_to_send).into(); - - let test_args = TestContext { - sender: AssetHubRococoSender::get(), - receiver: RococoReceiver::get(), - args: TestArgs::new_para(destination, beneficiary_id, amount_to_send, assets, None, 0), - }; - - let mut test = SystemParaToRelayTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(para_origin_assertions); - test.set_assertion::(relay_dest_assertions_fail); - test.set_dispatchable::(system_para_teleport_assets); - test.assert(); - - let delivery_fees = AssetHubRococo::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // Receiver's balance does not change - assert_eq!(receiver_balance_after, receiver_balance_before); -} - #[test] fn teleport_to_other_system_parachains_works() { let amount = ASSET_HUB_ROCOCO_ED * 100; @@ -593,7 +450,7 @@ fn bidirectional_teleport_foreign_assets_between_para_and_asset_hub() { ::RuntimeOrigin::signed(asset_owner.clone()), system_para_native_asset_location, sender.clone(), - fee_amount_to_send, + fee_amount_to_send * 2, ); // No need to create the asset (only mint) as it exists in genesis. PenpalA::mint_asset( diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml index 8ac8efb5218fbf4f9871c2861fdf923b273abedf..3121ed028eb9184cd4e2359e78f17ce6a650c22d 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml @@ -31,12 +31,12 @@ pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-fe westend-runtime = { path = "../../../../../../../polkadot/runtime/westend" } # Cumulus -parachains-common = { path = "../../../../../../parachains/common" } +parachains-common = { path = "../../../../../common" } testnet-parachains-constants = { path = "../../../../../runtimes/constants", features = ["westend"] } penpal-runtime = { path = "../../../../../runtimes/testing/penpal" } asset-hub-westend-runtime = { path = "../../../../../runtimes/assets/asset-hub-westend" } asset-test-utils = { path = "../../../../../runtimes/assets/test-utils" } -cumulus-pallet-xcmp-queue = { default-features = false, path = "../../../../../../pallets/xcmp-queue" } -cumulus-pallet-parachain-system = { default-features = false, path = "../../../../../../pallets/parachain-system" } +cumulus-pallet-xcmp-queue = { path = "../../../../../../pallets/xcmp-queue", default-features = false } +cumulus-pallet-parachain-system = { path = "../../../../../../pallets/parachain-system", default-features = false } emulated-integration-tests-common = { path = "../../../common", default-features = false } westend-system-emulated-network = { path = "../../../networks/westend-system" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs index 3f899d1dbdbcb77174bfec068d0257b63e3c0a48..c9f5fe0647e12ba0121261505e27ff56c3f82f96 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs @@ -67,17 +67,13 @@ mod imports { // Runtimes pub use asset_hub_westend_runtime::xcm_config::{ - UniversalLocation as AssetHubWestendUniversalLocation, WestendLocation as RelayLocation, - XcmConfig as AssetHubWestendXcmConfig, + WestendLocation as RelayLocation, XcmConfig as AssetHubWestendXcmConfig, }; pub use penpal_runtime::xcm_config::{ LocalReservableFromAssetHub as PenpalLocalReservableFromAssetHub, LocalTeleportableToAssetHub as PenpalLocalTeleportableToAssetHub, - UniversalLocation as PenpalUniversalLocation, XcmConfig as PenpalWestendXcmConfig, - }; - pub use westend_runtime::xcm_config::{ - UniversalLocation as WestendUniversalLocation, XcmConfig as WestendXcmConfig, }; + pub use westend_runtime::xcm_config::XcmConfig as WestendXcmConfig; pub const ASSET_ID: u32 = 3; pub const ASSET_MIN_BALANCE: u128 = 1000; diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs index 8c836132b5469de948fb446999cb2e6f20f11135..a26dfef8e8e702ee3f22870116adc03ee8ed1ca2 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs @@ -215,7 +215,7 @@ fn para_to_system_para_assets_sender_assertions(t: ParaToSystemParaTest) { let system_para_native_asset_location = v3::Location::try_from(RelayLocation::get()).expect("conversion works"); let reservable_asset_location = - v3::Location::try_from(PenpalLocalReservableFromAssetHub::get()).expect("coversion works"); + v3::Location::try_from(PenpalLocalReservableFromAssetHub::get()).expect("conversion works"); PenpalA::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts(864_610_000, 8799))); assert_expected_events!( PenpalA, @@ -246,7 +246,7 @@ fn para_to_system_para_assets_sender_assertions(t: ParaToSystemParaTest) { fn system_para_to_para_assets_receiver_assertions(t: SystemParaToParaTest) { type RuntimeEvent = ::RuntimeEvent; let system_para_asset_location = - v3::Location::try_from(PenpalLocalReservableFromAssetHub::get()).expect("coversion works"); + v3::Location::try_from(PenpalLocalReservableFromAssetHub::get()).expect("conversion works"); PenpalA::assert_xcmp_queue_success(None); assert_expected_events!( PenpalA, @@ -524,7 +524,6 @@ fn reserve_transfer_native_asset_from_relay_to_para() { let destination = Westend::child_location_of(PenpalA::para_id()); let sender = WestendSender::get(); let amount_to_send: Balance = WESTEND_ED * 1000; - let assets: Assets = (Here, amount_to_send).into(); // Init values fot Parachain let relay_native_asset_location = @@ -552,15 +551,6 @@ fn reserve_transfer_native_asset_from_relay_to_para() { test.set_dispatchable::(relay_to_para_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = Westend::execute_with(|| { - let reanchored_assets = - assets.reanchored(&destination, &WestendUniversalLocation::get()).unwrap(); - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(reanchored_assets, 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_balance_after = test.sender.balance; let receiver_assets_after = PenpalA::execute_with(|| { @@ -568,8 +558,8 @@ fn reserve_transfer_native_asset_from_relay_to_para() { >::balance(relay_native_asset_location.into(), &receiver) }); - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_balance_after < sender_balance_before - amount_to_send); // Receiver's asset balance is increased assert!(receiver_assets_after > receiver_assets_before); // Receiver's asset balance increased by `amount_to_send - delivery_fees - bought_execution`; @@ -595,7 +585,7 @@ fn reserve_transfer_native_asset_from_para_to_relay() { ::RuntimeOrigin::signed(asset_owner), relay_native_asset_location, sender.clone(), - amount_to_send, + amount_to_send * 2, ); // Init values for Relay @@ -634,15 +624,6 @@ fn reserve_transfer_native_asset_from_para_to_relay() { test.set_dispatchable::(para_to_relay_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = PenpalA::execute_with(|| { - let reanchored_assets = - assets.reanchored(&destination, &PenpalUniversalLocation::get()).unwrap(); - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(reanchored_assets, 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_assets_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; @@ -650,8 +631,8 @@ fn reserve_transfer_native_asset_from_para_to_relay() { }); let receiver_balance_after = test.receiver.balance; - // Sender's balance is reduced - assert_eq!(sender_assets_before - amount_to_send - delivery_fees, sender_assets_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_assets_after < sender_assets_before - amount_to_send); // Receiver's asset balance is increased assert!(receiver_balance_after > receiver_balance_before); // Receiver's asset balance increased by `amount_to_send - delivery_fees - bought_execution`; @@ -705,16 +686,6 @@ fn reserve_transfer_native_asset_from_system_para_to_para() { test.set_dispatchable::(system_para_to_para_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = AssetHubWestend::execute_with(|| { - let reanchored_assets = assets - .reanchored(&destination, &AssetHubWestendUniversalLocation::get()) - .unwrap(); - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(reanchored_assets, 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_balance_after = test.sender.balance; let receiver_assets_after = PenpalA::execute_with(|| { @@ -722,8 +693,8 @@ fn reserve_transfer_native_asset_from_system_para_to_para() { >::balance(system_para_native_asset_location, &receiver) }); - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_balance_after < sender_balance_before - amount_to_send); // Receiver's assets is increased assert!(receiver_assets_after > receiver_assets_before); // Receiver's assets increased by `amount_to_send - delivery_fees - bought_execution`; @@ -749,7 +720,7 @@ fn reserve_transfer_native_asset_from_para_to_system_para() { ::RuntimeOrigin::signed(asset_owner), system_para_native_asset_location, sender.clone(), - amount_to_send, + amount_to_send * 2, ); // Init values for System Parachain @@ -789,15 +760,6 @@ fn reserve_transfer_native_asset_from_para_to_system_para() { test.set_dispatchable::(para_to_system_para_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = PenpalA::execute_with(|| { - let reanchored_assets = - assets.reanchored(&destination, &PenpalUniversalLocation::get()).unwrap(); - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(reanchored_assets, 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_assets_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; @@ -805,8 +767,8 @@ fn reserve_transfer_native_asset_from_para_to_system_para() { }); let receiver_balance_after = test.receiver.balance; - // Sender's balance is reduced - assert_eq!(sender_assets_before - amount_to_send - delivery_fees, sender_assets_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_assets_after < sender_assets_before - amount_to_send); // Receiver's balance is increased assert!(receiver_balance_after > receiver_balance_before); // Receiver's balance increased by `amount_to_send - delivery_fees - bought_execution`; @@ -1086,13 +1048,13 @@ fn reserve_transfer_native_asset_from_para_to_para_trough_relay() { ::RuntimeOrigin::signed(asset_owner), relay_native_asset_location, sender.clone(), - amount_to_send, + amount_to_send * 2, ); // fund the Parachain Origin's SA on Relay Chain with the native tokens held in reserve Westend::fund_accounts(vec![(sov_of_sender_on_relay.into(), amount_to_send * 2)]); - // Init values for Parachain Desitnation + // Init values for Parachain Destination let receiver = PenpalBReceiver::get(); // Init Test @@ -1120,13 +1082,6 @@ fn reserve_transfer_native_asset_from_para_to_para_trough_relay() { test.set_dispatchable::(para_to_para_through_relay_limited_reserve_transfer_assets); test.assert(); - // Calculate delivery fees - let delivery_fees = PenpalA::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - // Query final balances let sender_assets_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; @@ -1137,8 +1092,8 @@ fn reserve_transfer_native_asset_from_para_to_para_trough_relay() { >::balance(relay_native_asset_location, &receiver) }); - // Sender's balance is reduced - assert_eq!(sender_assets_before - amount_to_send - delivery_fees, sender_assets_after); + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_assets_after < sender_assets_before - amount_to_send); // Receiver's balance is increased assert!(receiver_assets_after > receiver_assets_before); } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs index eb0e985cc0ce6f84318f763cb37e707beaeca718..f218b539c387988f70314eff41c2e1ce4e97092b 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs @@ -75,10 +75,10 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_system_assets_works() { )]); PenpalA::execute_with(|| { - assert_ok!(::PolkadotXcm::send( + assert_ok!(::PolkadotXcm::send_blob( root_origin, bx!(system_para_destination), - bx!(xcm), + xcm.encode().try_into().unwrap(), )); PenpalA::assert_xcm_pallet_sent(); @@ -159,10 +159,10 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_assets_works() { )]); PenpalA::execute_with(|| { - assert_ok!(::PolkadotXcm::send( + assert_ok!(::PolkadotXcm::send_blob( root_origin, bx!(system_para_destination), - bx!(xcm), + xcm.encode().try_into().unwrap(), )); PenpalA::assert_xcm_pallet_sent(); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs index 04740d3115834831356fd6dabf6a85ac5784270f..aa673c03483af13cae2ac146049399644b265b6b 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs @@ -369,10 +369,10 @@ fn pay_xcm_fee_with_some_asset_swapped_for_native() { penpal.clone(), ); - assert_ok!(::PolkadotXcm::send( + assert_ok!(::PolkadotXcm::send_blob( penpal_root, bx!(asset_hub_location), - bx!(xcm), + xcm.encode().try_into().unwrap(), )); PenpalA::assert_xcm_pallet_sent(); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs index 61f547fe7c5ec9b4368b9af5b17dbdf009177cda..ac518d2ed4a445836364a23df313b319b8193e78 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs @@ -245,16 +245,6 @@ fn relay_limited_teleport_assets(t: RelayToSystemParaTest) -> DispatchResult { ) } -fn relay_teleport_assets(t: RelayToSystemParaTest) -> DispatchResult { - ::XcmPallet::teleport_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - ) -} - fn system_para_limited_teleport_assets(t: SystemParaToRelayTest) -> DispatchResult { ::PolkadotXcm::limited_teleport_assets( t.signed_origin, @@ -266,16 +256,6 @@ fn system_para_limited_teleport_assets(t: SystemParaToRelayTest) -> DispatchResu ) } -fn system_para_teleport_assets(t: SystemParaToRelayTest) -> DispatchResult { - ::PolkadotXcm::teleport_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - ) -} - fn para_to_system_para_transfer_assets(t: ParaToSystemParaTest) -> DispatchResult { ::PolkadotXcm::transfer_assets( t.signed_origin, @@ -322,7 +302,7 @@ fn limited_teleport_native_assets_from_relay_to_system_para_works() { test.assert(); let delivery_fees = Westend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -369,7 +349,7 @@ fn limited_teleport_native_assets_back_from_system_para_to_relay_works() { let receiver_balance_after = test.receiver.balance; let delivery_fees = AssetHubWestend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -410,7 +390,7 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { let receiver_balance_after = test.receiver.balance; let delivery_fees = AssetHubWestend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -421,129 +401,6 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { assert_eq!(receiver_balance_after, receiver_balance_before); } -/// Teleport of native asset from Relay Chain to the System Parachain should work -#[test] -fn teleport_native_assets_from_relay_to_system_para_works() { - // Init values for Relay Chain - let amount_to_send: Balance = WESTEND_ED * 1000; - let dest = Westend::child_location_of(AssetHubWestend::para_id()); - let beneficiary_id = AssetHubWestendReceiver::get(); - let test_args = TestContext { - sender: WestendSender::get(), - receiver: AssetHubWestendReceiver::get(), - args: TestArgs::new_relay(dest, beneficiary_id, amount_to_send), - }; - - let mut test = RelayToSystemParaTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(relay_origin_assertions); - test.set_assertion::(para_dest_assertions); - test.set_dispatchable::(relay_teleport_assets); - test.assert(); - - let delivery_fees = Westend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // Receiver's balance is increased - assert!(receiver_balance_after > receiver_balance_before); -} - -/// Teleport of native asset from System Parachains to the Relay Chain -/// should work when there is enough balance in Relay Chain's `CheckAccount` -#[test] -fn teleport_native_assets_back_from_system_para_to_relay_works() { - // Dependency - Relay Chain's `CheckAccount` should have enough balance - teleport_native_assets_from_relay_to_system_para_works(); - - // Init values for Relay Chain - let amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 1000; - let destination = AssetHubWestend::parent_location(); - let beneficiary_id = WestendReceiver::get(); - let assets = (Parent, amount_to_send).into(); - - let test_args = TestContext { - sender: AssetHubWestendSender::get(), - receiver: WestendReceiver::get(), - args: TestArgs::new_para(destination, beneficiary_id, amount_to_send, assets, None, 0), - }; - - let mut test = SystemParaToRelayTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(para_origin_assertions); - test.set_assertion::(relay_dest_assertions); - test.set_dispatchable::(system_para_teleport_assets); - test.assert(); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - let delivery_fees = AssetHubWestend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // Receiver's balance is increased - assert!(receiver_balance_after > receiver_balance_before); -} - -/// Teleport of native asset from System Parachain to Relay Chain -/// shouldn't work when there is not enough balance in Relay Chain's `CheckAccount` -#[test] -fn teleport_native_assets_from_system_para_to_relay_fails() { - // Init values for Relay Chain - let amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 1000; - let destination = AssetHubWestend::parent_location(); - let beneficiary_id = WestendReceiver::get(); - let assets = (Parent, amount_to_send).into(); - - let test_args = TestContext { - sender: AssetHubWestendSender::get(), - receiver: WestendReceiver::get(), - args: TestArgs::new_para(destination, beneficiary_id, amount_to_send, assets, None, 0), - }; - - let mut test = SystemParaToRelayTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(para_origin_assertions); - test.set_assertion::(relay_dest_assertions_fail); - test.set_dispatchable::(system_para_teleport_assets); - test.assert(); - - let delivery_fees = AssetHubWestend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // Receiver's balance does not change - assert_eq!(receiver_balance_after, receiver_balance_before); -} - #[test] fn teleport_to_other_system_parachains_works() { let amount = ASSET_HUB_WESTEND_ED * 100; @@ -593,7 +450,7 @@ fn bidirectional_teleport_foreign_assets_between_para_and_asset_hub() { ::RuntimeOrigin::signed(asset_owner.clone()), system_para_native_asset_location, sender.clone(), - fee_amount_to_send, + fee_amount_to_send * 2, ); // No need to create the asset (only mint) as it exists in genesis. PenpalA::mint_asset( diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml index 89f0d2a9ca6dacae72e73d5b6e8c310347389070..010c252658c06b8c8da706e42a3c40eae1b854e9 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } hex-literal = "0.4.1" # Substrate @@ -34,10 +34,10 @@ pallet-bridge-messages = { path = "../../../../../../../bridges/modules/messages bp-messages = { path = "../../../../../../../bridges/primitives/messages", default-features = false } # Cumulus -parachains-common = { path = "../../../../../../parachains/common" } +parachains-common = { path = "../../../../../common" } testnet-parachains-constants = { path = "../../../../../runtimes/constants", features = ["rococo"] } cumulus-pallet-xcmp-queue = { path = "../../../../../../pallets/xcmp-queue", default-features = false } -bridge-hub-rococo-runtime = { path = "../../../../../../parachains/runtimes/bridge-hubs/bridge-hub-rococo", default-features = false } +bridge-hub-rococo-runtime = { path = "../../../../../runtimes/bridge-hubs/bridge-hub-rococo", default-features = false } emulated-integration-tests-common = { path = "../../../common", default-features = false } rococo-westend-system-emulated-network = { path = "../../../networks/rococo-westend-system" } rococo-system-emulated-network = { path = "../../../networks/rococo-system" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs index a1d871cdb618fdddfbbbc3e7812d0ec7f7ae7866..4bd041dc03f4216c9eddf811d325e8262873e473 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs @@ -14,6 +14,7 @@ // limitations under the License. use crate::tests::*; +use codec::Encode; #[test] fn send_xcm_from_rococo_relay_to_westend_asset_hub_should_fail_on_not_applicable() { @@ -26,7 +27,7 @@ fn send_xcm_from_rococo_relay_to_westend_asset_hub_should_fail_on_not_applicable let remote_xcm = Xcm(vec![ClearOrigin]); - let xcm = VersionedXcm::from(Xcm(vec![ + let xcm = VersionedXcm::from(Xcm::<()>(vec![ UnpaidExecution { weight_limit, check_origin }, ExportMessage { network: WestendId.into(), @@ -38,10 +39,10 @@ fn send_xcm_from_rococo_relay_to_westend_asset_hub_should_fail_on_not_applicable // Rococo Global Consensus // Send XCM message from Relay Chain to Bridge Hub source Parachain Rococo::execute_with(|| { - assert_ok!(::XcmPallet::send( + assert_ok!(::XcmPallet::send_blob( sudo_origin, bx!(destination), - bx!(xcm), + xcm.encode().try_into().unwrap(), )); type RuntimeEvent = ::RuntimeEvent; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs index 1e74d63e1d5235cc0e7bfd0d07f75655fdb3e5a5..780ba57f78a18c87c327db2acc5cd27442a0221b 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs @@ -20,17 +20,17 @@ use frame_support::pallet_prelude::TypeInfo; use hex_literal::hex; use rococo_system_emulated_network::penpal_emulated_chain::CustomizableAssetFromSystemAssetHub; use rococo_westend_system_emulated_network::BridgeHubRococoParaSender as BridgeHubRococoSender; -use snowbridge_core::outbound::OperatingMode; +use snowbridge_core::{inbound::InboundQueueFixture, outbound::OperatingMode}; use snowbridge_pallet_inbound_queue_fixtures::{ - register_token::make_register_token_message, - register_token_with_insufficient_fee::make_register_token_with_infufficient_fee_message, - send_token::make_send_token_message, send_token_to_penpal::make_send_token_to_penpal_message, - InboundQueueFixture, + register_token::make_register_token_message, send_token::make_send_token_message, + send_token_to_penpal::make_send_token_to_penpal_message, }; use snowbridge_pallet_system; -use snowbridge_router_primitives::inbound::GlobalConsensusEthereumConvertsFor; +use snowbridge_router_primitives::inbound::{ + Command, GlobalConsensusEthereumConvertsFor, MessageV1, VersionedMessage, +}; use sp_core::H256; -use sp_runtime::{ArithmeticError::Underflow, DispatchError::Arithmetic}; +use sp_runtime::{DispatchError::Token, TokenError::FundsUnavailable}; use testnet_parachains_constants::rococo::snowbridge::EthereumNetwork; const INITIAL_FUND: u128 = 5_000_000_000 * ROCOCO_ED; @@ -39,6 +39,7 @@ const TREASURY_ACCOUNT: [u8; 32] = hex!("6d6f646c70792f74727372790000000000000000000000000000000000000000"); const WETH: [u8; 20] = hex!("87d1f7fdfEe7f651FaBc8bFCB6E086C278b77A7d"); const ETHEREUM_DESTINATION_ADDRESS: [u8; 20] = hex!("44a57ee2f2FCcb85FDa2B0B18EBD0D8D2333700e"); +const INSUFFICIENT_XCM_FEE: u128 = 1000; #[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] pub enum ControlCall { @@ -56,13 +57,11 @@ pub enum SnowbridgeControl { } pub fn send_inbound_message(fixture: InboundQueueFixture) -> DispatchResult { - EthereumBeaconClient::store_execution_header( - fixture.message.proof.block_hash, - fixture.execution_header, - 0, - H256::default(), - ); - + EthereumBeaconClient::store_finalized_header( + fixture.finalized_header, + fixture.block_roots_root, + ) + .unwrap(); EthereumInboundQueue::submit( RuntimeOrigin::signed(BridgeHubRococoSender::get()), fixture.message, @@ -83,7 +82,7 @@ fn create_agent() { let create_agent_call = SnowbridgeControl::Control(ControlCall::CreateAgent {}); // Construct XCM to create an agent for para 1001 - let remote_xcm = VersionedXcm::from(Xcm(vec![ + let remote_xcm = VersionedXcm::from(Xcm::<()>(vec![ UnpaidExecution { weight_limit: Unlimited, check_origin: None }, DescendOrigin(Parachain(origin_para).into()), Transact { @@ -96,10 +95,10 @@ fn create_agent() { // Rococo Global Consensus // Send XCM message from Relay Chain to Bridge Hub source Parachain Rococo::execute_with(|| { - assert_ok!(::XcmPallet::send( + assert_ok!(::XcmPallet::send_blob( sudo_origin, bx!(destination), - bx!(remote_xcm), + remote_xcm.encode().try_into().unwrap(), )); type RuntimeEvent = ::RuntimeEvent; @@ -141,7 +140,7 @@ fn create_channel() { let create_agent_call = SnowbridgeControl::Control(ControlCall::CreateAgent {}); // Construct XCM to create an agent for para 1001 - let create_agent_xcm = VersionedXcm::from(Xcm(vec![ + let create_agent_xcm = VersionedXcm::from(Xcm::<()>(vec![ UnpaidExecution { weight_limit: Unlimited, check_origin: None }, DescendOrigin(Parachain(origin_para).into()), Transact { @@ -154,7 +153,7 @@ fn create_channel() { let create_channel_call = SnowbridgeControl::Control(ControlCall::CreateChannel { mode: OperatingMode::Normal }); // Construct XCM to create a channel for para 1001 - let create_channel_xcm = VersionedXcm::from(Xcm(vec![ + let create_channel_xcm = VersionedXcm::from(Xcm::<()>(vec![ UnpaidExecution { weight_limit: Unlimited, check_origin: None }, DescendOrigin(Parachain(origin_para).into()), Transact { @@ -167,16 +166,16 @@ fn create_channel() { // Rococo Global Consensus // Send XCM message from Relay Chain to Bridge Hub source Parachain Rococo::execute_with(|| { - assert_ok!(::XcmPallet::send( + assert_ok!(::XcmPallet::send_blob( sudo_origin.clone(), bx!(destination.clone()), - bx!(create_agent_xcm), + create_agent_xcm.encode().try_into().unwrap(), )); - assert_ok!(::XcmPallet::send( + assert_ok!(::XcmPallet::send_blob( sudo_origin, bx!(destination), - bx!(create_channel_xcm), + create_channel_xcm.encode().try_into().unwrap(), )); type RuntimeEvent = ::RuntimeEvent; @@ -237,6 +236,46 @@ fn register_weth_token_from_ethereum_to_asset_hub() { }); } +/// Tests the registering of a token as an asset on AssetHub, and then subsequently sending +/// a token from Ethereum to AssetHub. +#[test] +fn send_token_from_ethereum_to_asset_hub() { + BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id().into(), INITIAL_FUND); + + // Fund ethereum sovereign on AssetHub + AssetHubRococo::fund_accounts(vec![(AssetHubRococoReceiver::get(), INITIAL_FUND)]); + + BridgeHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + // Construct RegisterToken message and sent to inbound queue + send_inbound_message(make_register_token_message()).unwrap(); + + // Construct SendToken message and sent to inbound queue + send_inbound_message(make_send_token_message()).unwrap(); + + // Check that the message was sent + assert_expected_events!( + BridgeHubRococo, + vec![ + RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {}, + ] + ); + }); + + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + // Check that the token was received and issued as a foreign asset on AssetHub + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { .. }) => {}, + ] + ); + }); +} + /// Tests sending a token to a 3rd party parachain, called PenPal. The token reserve is /// still located on AssetHub. #[test] @@ -296,6 +335,10 @@ fn send_token_from_ethereum_to_penpal() { // Construct RegisterToken message and sent to inbound queue send_inbound_message(make_register_token_message()).unwrap(); + // Construct SendToken message to AssetHub(only for increase the nonce as the same order in + // smoke test) + send_inbound_message(make_send_token_message()).unwrap(); + // Construct SendToken message and sent to inbound queue send_inbound_message(make_send_token_to_penpal_message()).unwrap(); @@ -331,46 +374,6 @@ fn send_token_from_ethereum_to_penpal() { }); } -/// Tests the registering of a token as an asset on AssetHub, and then subsequently sending -/// a token from Ethereum to AssetHub. -#[test] -fn send_token_from_ethereum_to_asset_hub() { - BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id().into(), INITIAL_FUND); - - // Fund ethereum sovereign on AssetHub - AssetHubRococo::fund_accounts(vec![(AssetHubRococoReceiver::get(), INITIAL_FUND)]); - - BridgeHubRococo::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - - // Construct RegisterToken message and sent to inbound queue - send_inbound_message(make_register_token_message()).unwrap(); - - // Construct SendToken message and sent to inbound queue - send_inbound_message(make_send_token_message()).unwrap(); - - // Check that the message was sent - assert_expected_events!( - BridgeHubRococo, - vec![ - RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {}, - ] - ); - }); - - AssetHubRococo::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - - // Check that the token was received and issued as a foreign asset on AssetHub - assert_expected_events!( - AssetHubRococo, - vec![ - RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { .. }) => {}, - ] - ); - }); -} - /// Tests the full cycle of token transfers: /// - registering a token on AssetHub /// - sending a token to AssetHub @@ -458,12 +461,13 @@ fn send_weth_asset_from_asset_hub_to_ethereum() { AssetHubRococoReceiver::get(), ); // Send the Weth back to Ethereum - ::PolkadotXcm::reserve_transfer_assets( + ::PolkadotXcm::limited_reserve_transfer_assets( RuntimeOrigin::signed(AssetHubRococoReceiver::get()), Box::new(destination), Box::new(beneficiary), Box::new(multi_assets), 0, + Unlimited, ) .unwrap(); let free_balance_after = ::Balances::free_balance( @@ -506,16 +510,35 @@ fn send_weth_asset_from_asset_hub_to_ethereum() { }); } +#[test] +fn send_token_from_ethereum_to_asset_hub_fail_for_insufficient_fund() { + // Insufficient fund + BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id().into(), 1_000); + + BridgeHubRococo::execute_with(|| { + assert_err!(send_inbound_message(make_register_token_message()), Token(FundsUnavailable)); + }); +} + #[test] fn register_weth_token_in_asset_hub_fail_for_insufficient_fee() { BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id().into(), INITIAL_FUND); BridgeHubRococo::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; - - // Construct RegisterToken message and sent to inbound queue - let message = make_register_token_with_infufficient_fee_message(); - send_inbound_message(message).unwrap(); + type EthereumInboundQueue = + ::EthereumInboundQueue; + let message_id: H256 = [0; 32].into(); + let message = VersionedMessage::V1(MessageV1 { + chain_id: CHAIN_ID, + command: Command::RegisterToken { + token: WETH.into(), + // Insufficient fee which should trigger the trap + fee: INSUFFICIENT_XCM_FEE, + }, + }); + let (xcm, _) = EthereumInboundQueue::do_convert(message_id, message).unwrap(); + let _ = EthereumInboundQueue::send_xcm(xcm, AssetHubRococo::para_id().into()).unwrap(); assert_expected_events!( BridgeHubRococo, @@ -536,13 +559,3 @@ fn register_weth_token_in_asset_hub_fail_for_insufficient_fee() { ); }); } - -#[test] -fn send_token_from_ethereum_to_asset_hub_fail_for_insufficient_fund() { - // Insufficient fund - BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id().into(), 1_000); - - BridgeHubRococo::execute_with(|| { - assert_err!(send_inbound_message(make_register_token_message()), Arithmetic(Underflow)); - }); -} diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml index 9d55903c858308b9382fda0fc03132d95b9f1028..9c45a7adeb4e5a3c112f0bb650e29e9a76a7e404 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml @@ -11,6 +11,7 @@ publish = false workspace = true [dependencies] +codec = { package = "parity-scale-codec", version = "3.6.0" } # Substrate frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false } @@ -30,8 +31,8 @@ pallet-bridge-messages = { path = "../../../../../../../bridges/modules/messages bp-messages = { path = "../../../../../../../bridges/primitives/messages", default-features = false } # Cumulus -parachains-common = { path = "../../../../../../parachains/common" } +parachains-common = { path = "../../../../../common" } cumulus-pallet-xcmp-queue = { path = "../../../../../../pallets/xcmp-queue", default-features = false } -bridge-hub-westend-runtime = { path = "../../../../../../parachains/runtimes/bridge-hubs/bridge-hub-westend", default-features = false } +bridge-hub-westend-runtime = { path = "../../../../../runtimes/bridge-hubs/bridge-hub-westend", default-features = false } emulated-integration-tests-common = { path = "../../../common", default-features = false } rococo-westend-system-emulated-network = { path = "../../../networks/rococo-westend-system" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs index b01be5e8dc84b4edf35651d0388baa1462b54c9b..f69747c17704cb47e11ec00e2d8a08a413fab0a4 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs @@ -14,6 +14,7 @@ // limitations under the License. use crate::tests::*; +use codec::Encode; #[test] fn send_xcm_from_westend_relay_to_rococo_asset_hub_should_fail_on_not_applicable() { @@ -26,7 +27,7 @@ fn send_xcm_from_westend_relay_to_rococo_asset_hub_should_fail_on_not_applicable let remote_xcm = Xcm(vec![ClearOrigin]); - let xcm = VersionedXcm::from(Xcm(vec![ + let xcm = VersionedXcm::from(Xcm::<()>(vec![ UnpaidExecution { weight_limit, check_origin }, ExportMessage { network: RococoId, @@ -38,10 +39,10 @@ fn send_xcm_from_westend_relay_to_rococo_asset_hub_should_fail_on_not_applicable // Westend Global Consensus // Send XCM message from Relay Chain to Bridge Hub source Parachain Westend::execute_with(|| { - assert_ok!(::XcmPallet::send( + assert_ok!(::XcmPallet::send_blob( sudo_origin, bx!(destination), - bx!(xcm), + xcm.encode().try_into().unwrap(), )); type RuntimeEvent = ::RuntimeEvent; diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/Cargo.toml index 609376c1fee606c6d3d284c0be8f5c30997acfb6..1570aa7662fcbbfe1b018251d2700ce2d653d165 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/Cargo.toml @@ -26,7 +26,7 @@ polkadot-runtime-common = { path = "../../../../../../../polkadot/runtime/common # Cumulus asset-test-utils = { path = "../../../../../runtimes/assets/test-utils" } -parachains-common = { path = "../../../../../../parachains/common" } +parachains-common = { path = "../../../../../common" } people-rococo-runtime = { path = "../../../../../runtimes/people/people-rococo" } emulated-integration-tests-common = { path = "../../../common", default-features = false } rococo-system-emulated-network = { path = "../../../networks/rococo-system" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs index 3abe5c6cf66a357a5dfcaa67649355846761bfd2..350d87d638ab25bd66a411a67fa3f109bdaffff9 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs @@ -155,7 +155,7 @@ fn limited_teleport_native_assets_from_relay_to_system_para_works() { test.assert(); let delivery_fees = Rococo::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -204,7 +204,7 @@ fn limited_teleport_native_assets_back_from_system_para_to_relay_works() { let receiver_balance_after = test.receiver.balance; let delivery_fees = PeopleRococo::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -248,7 +248,7 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { let receiver_balance_after = test.receiver.balance; let delivery_fees = PeopleRococo::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml index f2f3366798a0aacc77fe279efd1b47032c62c7dc..bc093dc0de6356d7cd98d20d12d63748ed248ff5 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml @@ -26,7 +26,7 @@ polkadot-runtime-common = { path = "../../../../../../../polkadot/runtime/common # Cumulus asset-test-utils = { path = "../../../../../runtimes/assets/test-utils" } -parachains-common = { path = "../../../../../../parachains/common" } +parachains-common = { path = "../../../../../common" } people-westend-runtime = { path = "../../../../../runtimes/people/people-westend" } emulated-integration-tests-common = { path = "../../../common", default-features = false } westend-system-emulated-network = { path = "../../../networks/westend-system" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs index eef35a99a8331d0b84dcd8d3f8e848855fc56d41..8697477ba769329755b40a87132c62b213861cc4 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs @@ -155,7 +155,7 @@ fn limited_teleport_native_assets_from_relay_to_system_para_works() { test.assert(); let delivery_fees = Westend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -204,7 +204,7 @@ fn limited_teleport_native_assets_back_from_system_para_to_relay_works() { let receiver_balance_after = test.receiver.balance; let delivery_fees = PeopleWestend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); @@ -248,7 +248,7 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { let receiver_balance_after = test.receiver.balance; let delivery_fees = PeopleWestend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< + xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); diff --git a/cumulus/parachains/pallets/collective-content/Cargo.toml b/cumulus/parachains/pallets/collective-content/Cargo.toml index 691be02f5b8e390bf150713bcf127af1e2ce44b0..b3fac47cb4ae2d78bdf7431d877a4a6edeb34ebb 100644 --- a/cumulus/parachains/pallets/collective-content/Cargo.toml +++ b/cumulus/parachains/pallets/collective-content/Cargo.toml @@ -11,9 +11,9 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", optional = true, default-features = false } +frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", default-features = false, optional = true } frame-support = { path = "../../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../../substrate/frame/system", default-features = false } diff --git a/cumulus/parachains/pallets/parachain-info/Cargo.toml b/cumulus/parachains/pallets/parachain-info/Cargo.toml index 0e2f965e1cffb114729b16612b12b146ce61e0da..17981d238fd1880e81ce04ebbe5b2a045ba27e41 100644 --- a/cumulus/parachains/pallets/parachain-info/Cargo.toml +++ b/cumulus/parachains/pallets/parachain-info/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../../substrate/frame/system", default-features = false } diff --git a/cumulus/parachains/pallets/ping/Cargo.toml b/cumulus/parachains/pallets/ping/Cargo.toml index 1afd55eb0b9273d37d025eb3520c9dd8be122048..15169b08b9108998d850f6333e458d73580d1b2a 100644 --- a/cumulus/parachains/pallets/ping/Cargo.toml +++ b/cumulus/parachains/pallets/ping/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-std = { path = "../../../../substrate/primitives/std", default-features = false } sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml index 95691a045b72284c5d42218f4346b0dbb9e62e8f..0733156716c11a6e1e50c9d921f059a2e2c1b193 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml @@ -13,7 +13,7 @@ workspace = true codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } hex-literal = { version = "0.4.1" } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } @@ -78,6 +78,7 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } @@ -86,10 +87,10 @@ assets-common = { path = "../common", default-features = false } # Bridges pallet-xcm-bridge-hub-router = { path = "../../../../../bridges/modules/xcm-bridge-hub-router", default-features = false } -bp-asset-hub-rococo = { path = "../../../../../bridges/primitives/chain-asset-hub-rococo", default-features = false } -bp-asset-hub-westend = { path = "../../../../../bridges/primitives/chain-asset-hub-westend", default-features = false } -bp-bridge-hub-rococo = { path = "../../../../../bridges/primitives/chain-bridge-hub-rococo", default-features = false } -bp-bridge-hub-westend = { path = "../../../../../bridges/primitives/chain-bridge-hub-westend", default-features = false } +bp-asset-hub-rococo = { path = "../../../../../bridges/chains/chain-asset-hub-rococo", default-features = false } +bp-asset-hub-westend = { path = "../../../../../bridges/chains/chain-asset-hub-westend", default-features = false } +bp-bridge-hub-rococo = { path = "../../../../../bridges/chains/chain-bridge-hub-rococo", default-features = false } +bp-bridge-hub-westend = { path = "../../../../../bridges/chains/chain-bridge-hub-westend", default-features = false } snowbridge-router-primitives = { path = "../../../../../bridges/snowbridge/primitives/router", default-features = false } [dev-dependencies] @@ -190,6 +191,7 @@ std = [ "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-aura/std", "cumulus-primitives-core/std", + "cumulus-primitives-storage-weight-reclaim/std", "cumulus-primitives-utility/std", "frame-benchmarking?/std", "frame-executive/std", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 689d8d56c48ba9fd4bbb129a7c1d0f8eec5ec426..7edec45abfbb2ee42d9e532498b887ea2b3c0e11 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -660,6 +660,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; } impl parachain_info::Config for Runtime {} @@ -954,6 +955,7 @@ pub type SignedExtra = ( frame_system::CheckNonce, frame_system::CheckWeight, pallet_asset_conversion_tx_payment::ChargeAssetTxPayment, + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs index 51b6543bae82ba496998706ab6c2aaf6e0ff604b..e0e231d7da279022293d42aa5e3b3fc1b8ad12d5 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,8 +64,30 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 22_136_000 picoseconds. - Weight::from_parts(22_518_000, 0) + // Minimum execution time: 21_224_000 picoseconds. + Weight::from_parts(21_821_000, 0) + .saturating_add(Weight::from_parts(0, 3610)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn send_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 21_474_000 picoseconds. + Weight::from_parts(22_072_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -90,8 +112,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 92_277_000 picoseconds. - Weight::from_parts(94_843_000, 0) + // Minimum execution time: 90_677_000 picoseconds. + Weight::from_parts(93_658_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -118,8 +140,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `400` // Estimated: `6196` - // Minimum execution time: 120_110_000 picoseconds. - Weight::from_parts(122_968_000, 0) + // Minimum execution time: 116_767_000 picoseconds. + Weight::from_parts(118_843_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(5)) @@ -148,8 +170,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `496` // Estimated: `6208` - // Minimum execution time: 143_116_000 picoseconds. - Weight::from_parts(147_355_000, 0) + // Minimum execution time: 137_983_000 picoseconds. + Weight::from_parts(141_396_000, 0) .saturating_add(Weight::from_parts(0, 6208)) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(7)) @@ -164,14 +186,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn execute_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_517_000 picoseconds. - Weight::from_parts(6_756_000, 0) + // Minimum execution time: 6_232_000 picoseconds. + Weight::from_parts(6_507_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -181,8 +213,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_894_000 picoseconds. - Weight::from_parts(2_024_000, 0) + // Minimum execution time: 1_884_000 picoseconds. + Weight::from_parts(2_016_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -208,8 +240,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 27_314_000 picoseconds. - Weight::from_parts(28_787_000, 0) + // Minimum execution time: 26_637_000 picoseconds. + Weight::from_parts(27_616_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -234,8 +266,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `363` // Estimated: `3828` - // Minimum execution time: 29_840_000 picoseconds. - Weight::from_parts(30_589_000, 0) + // Minimum execution time: 28_668_000 picoseconds. + Weight::from_parts(29_413_000, 0) .saturating_add(Weight::from_parts(0, 3828)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -246,8 +278,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_893_000 picoseconds. - Weight::from_parts(2_017_000, 0) + // Minimum execution time: 1_990_000 picoseconds. + Weight::from_parts(2_114_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -257,8 +289,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `159` // Estimated: `13524` - // Minimum execution time: 19_211_000 picoseconds. - Weight::from_parts(19_552_000, 0) + // Minimum execution time: 18_856_000 picoseconds. + Weight::from_parts(19_430_000, 0) .saturating_add(Weight::from_parts(0, 13524)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -269,8 +301,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `163` // Estimated: `13528` - // Minimum execution time: 19_177_000 picoseconds. - Weight::from_parts(19_704_000, 0) + // Minimum execution time: 19_068_000 picoseconds. + Weight::from_parts(19_434_000, 0) .saturating_add(Weight::from_parts(0, 13528)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -281,8 +313,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `173` // Estimated: `16013` - // Minimum execution time: 20_449_000 picoseconds. - Weight::from_parts(21_075_000, 0) + // Minimum execution time: 21_055_000 picoseconds. + Weight::from_parts(21_379_000, 0) .saturating_add(Weight::from_parts(0, 16013)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -304,8 +336,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `6152` - // Minimum execution time: 26_578_000 picoseconds. - Weight::from_parts(27_545_000, 0) + // Minimum execution time: 25_736_000 picoseconds. + Weight::from_parts(26_423_000, 0) .saturating_add(Weight::from_parts(0, 6152)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -316,8 +348,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `206` // Estimated: `11096` - // Minimum execution time: 11_646_000 picoseconds. - Weight::from_parts(11_944_000, 0) + // Minimum execution time: 11_853_000 picoseconds. + Weight::from_parts(12_215_000, 0) .saturating_add(Weight::from_parts(0, 11096)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -327,8 +359,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `170` // Estimated: `13535` - // Minimum execution time: 19_301_000 picoseconds. - Weight::from_parts(19_664_000, 0) + // Minimum execution time: 19_418_000 picoseconds. + Weight::from_parts(19_794_000, 0) .saturating_add(Weight::from_parts(0, 13535)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -351,8 +383,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `13577` - // Minimum execution time: 35_715_000 picoseconds. - Weight::from_parts(36_915_000, 0) + // Minimum execution time: 34_719_000 picoseconds. + Weight::from_parts(35_260_000, 0) .saturating_add(Weight::from_parts(0, 13577)) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) @@ -365,8 +397,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `1588` - // Minimum execution time: 4_871_000 picoseconds. - Weight::from_parts(5_066_000, 0) + // Minimum execution time: 4_937_000 picoseconds. + Weight::from_parts(5_203_000, 0) .saturating_add(Weight::from_parts(0, 1588)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -377,8 +409,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7740` // Estimated: `11205` - // Minimum execution time: 25_150_000 picoseconds. - Weight::from_parts(26_119_000, 0) + // Minimum execution time: 26_064_000 picoseconds. + Weight::from_parts(26_497_000, 0) .saturating_add(Weight::from_parts(0, 11205)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -389,8 +421,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `160` // Estimated: `3625` - // Minimum execution time: 38_248_000 picoseconds. - Weight::from_parts(39_122_000, 0) + // Minimum execution time: 37_132_000 picoseconds. + Weight::from_parts(37_868_000, 0) .saturating_add(Weight::from_parts(0, 3625)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs index c438361cc177eabe2bfc9056621d21e25db42db1..f4ff985e2773f02c17c75dbf3fcf78dd6aa0ada2 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs @@ -192,7 +192,7 @@ pub type ForeignFungiblesTransactor = FungiblesAdapter< LocationToAccountId, // Our chain's account ID type (we can't get away without mentioning it explicitly): AccountId, - // We dont need to check teleports here. + // We don't need to check teleports here. NoChecking, // The account to use for tracking teleports. CheckingAccount, @@ -286,8 +286,8 @@ impl Contains for SafeCallFilter { match call { RuntimeCall::System(frame_system::Call::set_storage { items }) if items.iter().all(|(k, _)| { - k.eq(&bridging::XcmBridgeHubRouterByteFee::key()) | - k.eq(&bridging::XcmBridgeHubRouterBaseFee::key()) | + k.eq(&bridging::XcmBridgeHubRouterByteFee::key()) || + k.eq(&bridging::XcmBridgeHubRouterBaseFee::key()) || k.eq(&bridging::to_ethereum::BridgeHubEthereumBaseFee::key()) }) => return true, diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml index 950c6e62d72fa0c23544456fce76b49b1d5d3766..e25554ec0a5f1af99b86bce2c3c1f60f256aa53c 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml @@ -13,7 +13,7 @@ workspace = true codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } hex-literal = { version = "0.4.1", optional = true } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } @@ -76,6 +76,7 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } @@ -84,10 +85,10 @@ assets-common = { path = "../common", default-features = false } # Bridges pallet-xcm-bridge-hub-router = { path = "../../../../../bridges/modules/xcm-bridge-hub-router", default-features = false } -bp-asset-hub-rococo = { path = "../../../../../bridges/primitives/chain-asset-hub-rococo", default-features = false } -bp-asset-hub-westend = { path = "../../../../../bridges/primitives/chain-asset-hub-westend", default-features = false } -bp-bridge-hub-rococo = { path = "../../../../../bridges/primitives/chain-bridge-hub-rococo", default-features = false } -bp-bridge-hub-westend = { path = "../../../../../bridges/primitives/chain-bridge-hub-westend", default-features = false } +bp-asset-hub-rococo = { path = "../../../../../bridges/chains/chain-asset-hub-rococo", default-features = false } +bp-asset-hub-westend = { path = "../../../../../bridges/chains/chain-asset-hub-westend", default-features = false } +bp-bridge-hub-rococo = { path = "../../../../../bridges/chains/chain-bridge-hub-rococo", default-features = false } +bp-bridge-hub-westend = { path = "../../../../../bridges/chains/chain-bridge-hub-westend", default-features = false } [dev-dependencies] hex-literal = "0.4.1" @@ -178,6 +179,7 @@ std = [ "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-aura/std", "cumulus-primitives-core/std", + "cumulus-primitives-storage-weight-reclaim/std", "cumulus-primitives-utility/std", "frame-benchmarking?/std", "frame-executive/std", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 48106b5f302d45bc7d4af104f9bb47e51854ab76..d17d5a707579d6df527adb0f625b225d95c5d385 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -641,6 +641,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; } impl cumulus_pallet_aura_ext::Config for Runtime {} @@ -926,6 +927,7 @@ pub type SignedExtra = ( frame_system::CheckNonce, frame_system::CheckWeight, pallet_asset_conversion_tx_payment::ChargeAssetTxPayment, + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs index 71facff87d35f350114850653acfbebeb5c29529..a36c25f96043dd9d2b183ff45c3030315bd5bd19 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-f3xfxtob-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,8 +64,30 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 21_630_000 picoseconds. - Weight::from_parts(22_306_000, 0) + // Minimum execution time: 21_050_000 picoseconds. + Weight::from_parts(21_834_000, 0) + .saturating_add(Weight::from_parts(0, 3610)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn send_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 21_164_000 picoseconds. + Weight::from_parts(21_656_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -90,8 +112,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 91_802_000 picoseconds. - Weight::from_parts(93_672_000, 0) + // Minimum execution time: 92_497_000 picoseconds. + Weight::from_parts(95_473_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -118,8 +140,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `367` // Estimated: `6196` - // Minimum execution time: 118_930_000 picoseconds. - Weight::from_parts(122_306_000, 0) + // Minimum execution time: 120_059_000 picoseconds. + Weight::from_parts(122_894_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(5)) @@ -148,8 +170,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `496` // Estimated: `6208` - // Minimum execution time: 140_527_000 picoseconds. - Weight::from_parts(144_501_000, 0) + // Minimum execution time: 141_977_000 picoseconds. + Weight::from_parts(145_981_000, 0) .saturating_add(Weight::from_parts(0, 6208)) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(7)) @@ -158,8 +180,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_556_000 picoseconds. - Weight::from_parts(7_798_000, 0) + // Minimum execution time: 7_426_000 picoseconds. + Weight::from_parts(7_791_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn execute_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_585_000 picoseconds. + Weight::from_parts(7_897_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) @@ -168,8 +198,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_373_000 picoseconds. - Weight::from_parts(6_603_000, 0) + // Minimum execution time: 6_224_000 picoseconds. + Weight::from_parts(6_793_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -179,8 +209,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_941_000 picoseconds. - Weight::from_parts(2_088_000, 0) + // Minimum execution time: 1_812_000 picoseconds. + Weight::from_parts(2_008_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -206,8 +236,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 27_080_000 picoseconds. - Weight::from_parts(27_820_000, 0) + // Minimum execution time: 26_586_000 picoseconds. + Weight::from_parts(27_181_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -232,8 +262,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `363` // Estimated: `3828` - // Minimum execution time: 28_850_000 picoseconds. - Weight::from_parts(29_506_000, 0) + // Minimum execution time: 28_295_000 picoseconds. + Weight::from_parts(29_280_000, 0) .saturating_add(Weight::from_parts(0, 3828)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -244,8 +274,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_033_000 picoseconds. - Weight::from_parts(2_201_000, 0) + // Minimum execution time: 1_803_000 picoseconds. + Weight::from_parts(1_876_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -255,8 +285,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `159` // Estimated: `13524` - // Minimum execution time: 18_844_000 picoseconds. - Weight::from_parts(19_197_000, 0) + // Minimum execution time: 18_946_000 picoseconds. + Weight::from_parts(19_456_000, 0) .saturating_add(Weight::from_parts(0, 13524)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -267,8 +297,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `163` // Estimated: `13528` - // Minimum execution time: 18_940_000 picoseconds. - Weight::from_parts(19_450_000, 0) + // Minimum execution time: 19_080_000 picoseconds. + Weight::from_parts(19_498_000, 0) .saturating_add(Weight::from_parts(0, 13528)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -279,8 +309,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `173` // Estimated: `16013` - // Minimum execution time: 20_521_000 picoseconds. - Weight::from_parts(21_076_000, 0) + // Minimum execution time: 20_637_000 picoseconds. + Weight::from_parts(21_388_000, 0) .saturating_add(Weight::from_parts(0, 16013)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -302,8 +332,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `6152` - // Minimum execution time: 26_007_000 picoseconds. - Weight::from_parts(26_448_000, 0) + // Minimum execution time: 25_701_000 picoseconds. + Weight::from_parts(26_269_000, 0) .saturating_add(Weight::from_parts(0, 6152)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -314,8 +344,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `206` // Estimated: `11096` - // Minimum execution time: 11_584_000 picoseconds. - Weight::from_parts(12_080_000, 0) + // Minimum execution time: 11_949_000 picoseconds. + Weight::from_parts(12_249_000, 0) .saturating_add(Weight::from_parts(0, 11096)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -325,8 +355,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `170` // Estimated: `13535` - // Minimum execution time: 19_157_000 picoseconds. - Weight::from_parts(19_513_000, 0) + // Minimum execution time: 19_278_000 picoseconds. + Weight::from_parts(19_538_000, 0) .saturating_add(Weight::from_parts(0, 13535)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -349,8 +379,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `13577` - // Minimum execution time: 34_878_000 picoseconds. - Weight::from_parts(35_623_000, 0) + // Minimum execution time: 35_098_000 picoseconds. + Weight::from_parts(35_871_000, 0) .saturating_add(Weight::from_parts(0, 13577)) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) @@ -363,8 +393,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `1588` - // Minimum execution time: 3_900_000 picoseconds. - Weight::from_parts(4_161_000, 0) + // Minimum execution time: 3_862_000 picoseconds. + Weight::from_parts(4_082_000, 0) .saturating_add(Weight::from_parts(0, 1588)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -375,8 +405,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7740` // Estimated: `11205` - // Minimum execution time: 25_731_000 picoseconds. - Weight::from_parts(26_160_000, 0) + // Minimum execution time: 25_423_000 picoseconds. + Weight::from_parts(25_872_000, 0) .saturating_add(Weight::from_parts(0, 11205)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -387,8 +417,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `160` // Estimated: `3625` - // Minimum execution time: 37_251_000 picoseconds. - Weight::from_parts(38_075_000, 0) + // Minimum execution time: 37_148_000 picoseconds. + Weight::from_parts(37_709_000, 0) .saturating_add(Weight::from_parts(0, 3625)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs index c993d61545a66c24826c0ab66e20546e3fa752b0..7d3ed650e6bfcd10e91bfe19fb9463b6c8a4714f 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs @@ -185,7 +185,7 @@ pub type ForeignFungiblesTransactor = FungiblesAdapter< LocationToAccountId, // Our chain's account ID type (we can't get away without mentioning it explicitly): AccountId, - // We dont need to check teleports here. + // We don't need to check teleports here. NoChecking, // The account to use for tracking teleports. CheckingAccount, diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs index 3ba9b9587d888bd1821751053ecaa19caacf814d..6696cb2322391c2dbad5f6c9a0afc4d5537de68c 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs @@ -618,7 +618,7 @@ fn test_asset_xcm_take_first_trader_with_refund() { // We actually use half of the weight let weight_used = bought / 2; - // Make sure refurnd works. + // Make sure refund works. let amount_refunded = WeightToFee::weight_to_fee(&(bought - weight_used)); assert_eq!( @@ -745,7 +745,7 @@ fn test_that_buying_ed_refund_does_not_refund_for_take_first_trader() { // Buy weight should work assert_ok!(trader.buy_weight(bought, asset.into(), &ctx)); - // Should return None. We have a specific check making sure we dont go below ED for + // Should return None. We have a specific check making sure we don't go below ED for // drop payment assert_eq!(trader.refund_weight(bought, &ctx), None); diff --git a/cumulus/parachains/runtimes/assets/common/Cargo.toml b/cumulus/parachains/runtimes/assets/common/Cargo.toml index c9252375cfbf019c27a1ce0d5dc38f11db30cac5..12dfd9da1fffbc6d1be0edc40ab6de3ebcc78e20 100644 --- a/cumulus/parachains/runtimes/assets/common/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/common/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } log = { workspace = true } impl-trait-for-tuples = "0.2.2" diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs index 53e10956bd0d1d233555a8e4df1ae23fe351c678..884b71369e79ad9713ea3cc8e243820862c9c20d 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs @@ -186,7 +186,7 @@ pub fn teleports_for_native_asset_works< // Mint funds into account to ensure it has enough balance to pay delivery fees let delivery_fees = - xcm_helpers::transfer_assets_delivery_fees::( + xcm_helpers::teleport_assets_delivery_fees::( (native_asset_id.clone(), native_asset_to_teleport_away.into()).into(), 0, Unlimited, @@ -579,7 +579,7 @@ pub fn teleports_for_foreign_assets_works< // Make sure the target account has enough native asset to pay for delivery fees let delivery_fees = - xcm_helpers::transfer_assets_delivery_fees::( + xcm_helpers::teleport_assets_delivery_fees::( (foreign_asset_id_location_latest.clone(), asset_to_teleport_away).into(), 0, Unlimited, @@ -1120,7 +1120,7 @@ pub fn create_and_manage_foreign_assets_for_local_consensus_parachain_assets_wor AssetId: Clone, AssetIdConverter: MaybeEquivalence, { - // foreign parachain with the same consenus currency as asset + // foreign parachain with the same consensus currency as asset let foreign_asset_id_location = Location::new(1, [Parachain(2222), GeneralIndex(1234567)]); let asset_id = AssetIdConverter::convert(&foreign_asset_id_location).unwrap(); diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs index 1cce3b647cf0446a2246417b5383594fb501e600..0b2364dbb8bd858a95f6a109c800032cb286412b 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs @@ -386,7 +386,7 @@ pub fn receive_reserve_asset_deposited_from_different_consensus_works< existential_deposit, ); - // create foreign asset for wrapped/derivated representation + // create foreign asset for wrapped/derived representation assert_ok!( >::force_create( RuntimeHelper::::root_origin(), diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/xcm_helpers.rs b/cumulus/parachains/runtimes/assets/test-utils/src/xcm_helpers.rs index f509a3a8acaad9ea2f499c2fa48ca72e0b0882d9..ca0e81fae42eda216f93ed6265dd6dd87f6de538 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/xcm_helpers.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/xcm_helpers.rs @@ -18,11 +18,10 @@ use xcm::latest::prelude::*; -/// Returns the delivery fees amount for pallet xcm's `teleport_assets` and -/// `reserve_transfer_assets` extrinsics. +/// Returns the delivery fees amount for pallet xcm's `teleport_assets` extrinsics. /// Because it returns only a `u128`, it assumes delivery fees are only paid /// in one asset and that asset is known. -pub fn transfer_assets_delivery_fees( +pub fn teleport_assets_delivery_fees( assets: Assets, fee_asset_item: u32, weight_limit: WeightLimit, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index 8a6823ea3ee834e8588d7262500aabd733bcbec2..f5a75aa03acd01381666a30439789387ae699ac9 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -18,7 +18,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = ] } hex-literal = { version = "0.4.1" } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = [ +scale-info = { version = "2.11.1", default-features = false, features = [ "derive", ] } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } @@ -78,6 +78,7 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f ] } cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } @@ -85,20 +86,20 @@ parachains-common = { path = "../../../common", default-features = false } testnet-parachains-constants = { path = "../../constants", default-features = false, features = ["rococo"] } # Bridges -bp-asset-hub-rococo = { path = "../../../../../bridges/primitives/chain-asset-hub-rococo", default-features = false } -bp-asset-hub-westend = { path = "../../../../../bridges/primitives/chain-asset-hub-westend", default-features = false } -bp-bridge-hub-polkadot = { path = "../../../../../bridges/primitives/chain-bridge-hub-polkadot", default-features = false } -bp-bridge-hub-rococo = { path = "../../../../../bridges/primitives/chain-bridge-hub-rococo", default-features = false } -bp-bridge-hub-westend = { path = "../../../../../bridges/primitives/chain-bridge-hub-westend", default-features = false } +bp-asset-hub-rococo = { path = "../../../../../bridges/chains/chain-asset-hub-rococo", default-features = false } +bp-asset-hub-westend = { path = "../../../../../bridges/chains/chain-asset-hub-westend", default-features = false } +bp-bridge-hub-polkadot = { path = "../../../../../bridges/chains/chain-bridge-hub-polkadot", default-features = false } +bp-bridge-hub-rococo = { path = "../../../../../bridges/chains/chain-bridge-hub-rococo", default-features = false } +bp-bridge-hub-westend = { path = "../../../../../bridges/chains/chain-bridge-hub-westend", default-features = false } bp-header-chain = { path = "../../../../../bridges/primitives/header-chain", default-features = false } bp-messages = { path = "../../../../../bridges/primitives/messages", default-features = false } bp-parachains = { path = "../../../../../bridges/primitives/parachains", default-features = false } -bp-polkadot-bulletin = { path = "../../../../../bridges/primitives/chain-polkadot-bulletin", default-features = false } +bp-polkadot-bulletin = { path = "../../../../../bridges/chains/chain-polkadot-bulletin", default-features = false } bp-polkadot-core = { path = "../../../../../bridges/primitives/polkadot-core", default-features = false } bp-relayers = { path = "../../../../../bridges/primitives/relayers", default-features = false } bp-runtime = { path = "../../../../../bridges/primitives/runtime", default-features = false } -bp-rococo = { path = "../../../../../bridges/primitives/chain-rococo", default-features = false } -bp-westend = { path = "../../../../../bridges/primitives/chain-westend", default-features = false } +bp-rococo = { path = "../../../../../bridges/chains/chain-rococo", default-features = false } +bp-westend = { path = "../../../../../bridges/chains/chain-westend", default-features = false } pallet-bridge-grandpa = { path = "../../../../../bridges/modules/grandpa", default-features = false } pallet-bridge-messages = { path = "../../../../../bridges/modules/messages", default-features = false } pallet-bridge-parachains = { path = "../../../../../bridges/modules/parachains", default-features = false } @@ -156,6 +157,7 @@ std = [ "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-aura/std", "cumulus-primitives-core/std", + "cumulus-primitives-storage-weight-reclaim/std", "cumulus-primitives-utility/std", "frame-benchmarking/std", "frame-executive/std", diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 3980fa0d501a1db10f6cb48bdde87946031be8f4..9796a77f994e47d4e52fe75e8ab74434b97307d7 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -100,8 +100,6 @@ use parachains_common::{ AVERAGE_ON_INITIALIZE_RATIO, NORMAL_DISPATCH_RATIO, }; -use polkadot_runtime_common::prod_or_fast; - #[cfg(feature = "runtime-benchmarks")] use benchmark_helpers::DoNothingRouter; @@ -132,6 +130,7 @@ pub type SignedExtra = ( bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages, bridge_to_bulletin_config::OnBridgeHubRococoRefundRococoBulletinMessages, ), + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. @@ -387,6 +386,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; } impl cumulus_pallet_aura_ext::Config for Runtime {} @@ -514,14 +514,14 @@ parameter_types! { pub mod benchmark_helpers { use crate::{EthereumBeaconClient, Runtime, RuntimeOrigin}; use codec::Encode; - use snowbridge_beacon_primitives::CompactExecutionHeader; + use snowbridge_beacon_primitives::BeaconHeader; use snowbridge_pallet_inbound_queue::BenchmarkHelper; use sp_core::H256; use xcm::latest::{Assets, Location, SendError, SendResult, SendXcm, Xcm, XcmHash}; impl BenchmarkHelper for Runtime { - fn initialize_storage(block_hash: H256, header: CompactExecutionHeader) { - EthereumBeaconClient::store_execution_header(block_hash, header, 0, H256::default()) + fn initialize_storage(beacon_header: BeaconHeader, block_roots_root: H256) { + EthereumBeaconClient::store_finalized_header(beacon_header, block_roots_root).unwrap(); } } @@ -642,14 +642,9 @@ parameter_types! { }; } -parameter_types! { - pub const MaxExecutionHeadersToKeep: u32 = prod_or_fast!(8192 * 2, 1000); -} - impl snowbridge_pallet_ethereum_client::Config for Runtime { type RuntimeEvent = RuntimeEvent; type ForkVersions = ChainForkVersions; - type MaxExecutionHeadersToKeep = MaxExecutionHeadersToKeep; type WeightInfo = weights::snowbridge_pallet_ethereum_client::WeightInfo; } @@ -1499,7 +1494,8 @@ mod tests { ( bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages::default(), bridge_to_bulletin_config::OnBridgeHubRococoRefundRococoBulletinMessages::default(), - ) + ), + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), ); // for BridgeHubRococo diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs index a732e1a573439c4b658191697024ac3c396c9de5..adfaa9ea2028e4b8880e17a1ccc471beb64c9a3c 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,8 +64,30 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 18_513_000 picoseconds. - Weight::from_parts(19_156_000, 0) + // Minimum execution time: 18_732_000 picoseconds. + Weight::from_parts(19_386_000, 0) + .saturating_add(Weight::from_parts(0, 3503)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn send_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `38` + // Estimated: `3503` + // Minimum execution time: 18_943_000 picoseconds. + Weight::from_parts(19_455_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -90,8 +112,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3593` - // Minimum execution time: 88_096_000 picoseconds. - Weight::from_parts(89_732_000, 0) + // Minimum execution time: 88_917_000 picoseconds. + Weight::from_parts(91_611_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -126,8 +148,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3593` - // Minimum execution time: 88_239_000 picoseconds. - Weight::from_parts(89_729_000, 0) + // Minimum execution time: 88_587_000 picoseconds. + Weight::from_parts(90_303_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -142,14 +164,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn execute_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_955_000 picoseconds. - Weight::from_parts(6_266_000, 0) + // Minimum execution time: 5_856_000 picoseconds. + Weight::from_parts(6_202_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -159,8 +191,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_868_000 picoseconds. - Weight::from_parts(1_961_000, 0) + // Minimum execution time: 1_797_000 picoseconds. + Weight::from_parts(1_970_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -186,8 +218,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 24_388_000 picoseconds. - Weight::from_parts(25_072_000, 0) + // Minimum execution time: 24_479_000 picoseconds. + Weight::from_parts(25_058_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -212,8 +244,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `255` // Estimated: `3720` - // Minimum execution time: 26_762_000 picoseconds. - Weight::from_parts(27_631_000, 0) + // Minimum execution time: 27_282_000 picoseconds. + Weight::from_parts(27_924_000, 0) .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -224,8 +256,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_856_000 picoseconds. - Weight::from_parts(2_033_000, 0) + // Minimum execution time: 1_801_000 picoseconds. + Weight::from_parts(1_988_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -235,8 +267,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89` // Estimated: `13454` - // Minimum execution time: 17_718_000 picoseconds. - Weight::from_parts(18_208_000, 0) + // Minimum execution time: 16_509_000 picoseconds. + Weight::from_parts(16_939_000, 0) .saturating_add(Weight::from_parts(0, 13454)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -247,8 +279,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `93` // Estimated: `13458` - // Minimum execution time: 17_597_000 picoseconds. - Weight::from_parts(18_090_000, 0) + // Minimum execution time: 16_140_000 picoseconds. + Weight::from_parts(16_843_000, 0) .saturating_add(Weight::from_parts(0, 13458)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -259,8 +291,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `15946` - // Minimum execution time: 19_533_000 picoseconds. - Weight::from_parts(20_164_000, 0) + // Minimum execution time: 18_160_000 picoseconds. + Weight::from_parts(18_948_000, 0) .saturating_add(Weight::from_parts(0, 15946)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -282,8 +314,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 24_958_000 picoseconds. - Weight::from_parts(25_628_000, 0) + // Minimum execution time: 24_409_000 picoseconds. + Weight::from_parts(25_261_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -294,8 +326,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `136` // Estimated: `11026` - // Minimum execution time: 12_209_000 picoseconds. - Weight::from_parts(12_612_000, 0) + // Minimum execution time: 10_848_000 picoseconds. + Weight::from_parts(11_241_000, 0) .saturating_add(Weight::from_parts(0, 11026)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -305,8 +337,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `100` // Estimated: `13465` - // Minimum execution time: 17_844_000 picoseconds. - Weight::from_parts(18_266_000, 0) + // Minimum execution time: 16_609_000 picoseconds. + Weight::from_parts(17_044_000, 0) .saturating_add(Weight::from_parts(0, 13465)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -329,8 +361,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `13471` - // Minimum execution time: 34_131_000 picoseconds. - Weight::from_parts(34_766_000, 0) + // Minimum execution time: 32_500_000 picoseconds. + Weight::from_parts(33_475_000, 0) .saturating_add(Weight::from_parts(0, 13471)) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) @@ -343,8 +375,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 3_525_000 picoseconds. - Weight::from_parts(3_724_000, 0) + // Minimum execution time: 3_484_000 picoseconds. + Weight::from_parts(3_673_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -355,8 +387,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 24_975_000 picoseconds. - Weight::from_parts(25_517_000, 0) + // Minimum execution time: 25_225_000 picoseconds. + Weight::from_parts(25_731_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -367,8 +399,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 33_761_000 picoseconds. - Weight::from_parts(34_674_000, 0) + // Minimum execution time: 33_961_000 picoseconds. + Weight::from_parts(34_818_000, 0) .saturating_add(Weight::from_parts(0, 3555)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_pallet_ethereum_client.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_pallet_ethereum_client.rs index 0d5f29c6ff2f21165e45649848bd24664acd7e19..c8017939b627c7500694b6a88d052731b6cf715c 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_pallet_ethereum_client.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_pallet_ethereum_client.rs @@ -126,26 +126,4 @@ impl snowbridge_pallet_ethereum_client::WeightInfo for .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: EthereumBeaconClient LatestFinalizedBlockRoot (r:1 w:0) - /// Proof: EthereumBeaconClient LatestFinalizedBlockRoot (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: EthereumBeaconClient FinalizedBeaconState (r:1 w:0) - /// Proof: EthereumBeaconClient FinalizedBeaconState (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: EthereumBeaconClient LatestExecutionState (r:1 w:1) - /// Proof: EthereumBeaconClient LatestExecutionState (max_values: Some(1), max_size: Some(80), added: 575, mode: MaxEncodedLen) - /// Storage: EthereumBeaconClient ExecutionHeaderIndex (r:1 w:1) - /// Proof: EthereumBeaconClient ExecutionHeaderIndex (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: EthereumBeaconClient ExecutionHeaderMapping (r:1 w:1) - /// Proof: EthereumBeaconClient ExecutionHeaderMapping (max_values: None, max_size: Some(36), added: 2511, mode: MaxEncodedLen) - /// Storage: EthereumBeaconClient ExecutionHeaders (r:0 w:1) - /// Proof: EthereumBeaconClient ExecutionHeaders (max_values: None, max_size: Some(136), added: 2611, mode: MaxEncodedLen) - fn submit_execution_header() -> Weight { - // Proof Size summary in bytes: - // Measured: `386` - // Estimated: `3537` - // Minimum execution time: 108_761_000 picoseconds. - Weight::from_parts(113_158_000, 0) - .saturating_add(Weight::from_parts(0, 3537)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) - } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_pallet_inbound_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_pallet_inbound_queue.rs index faf404f90cb34dd3825df585bb3221031147bb47..153c1d363be10888601dfa66bdcdf5e88af57001 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_pallet_inbound_queue.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/snowbridge_pallet_inbound_queue.rs @@ -58,12 +58,12 @@ impl snowbridge_pallet_inbound_queue::WeightInfo for We /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn submit() -> Weight { // Proof Size summary in bytes: - // Measured: `457` - // Estimated: `3601` - // Minimum execution time: 69_000_000 picoseconds. - Weight::from_parts(70_000_000, 0) - .saturating_add(Weight::from_parts(0, 3601)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `800` + // Estimated: `7200` + // Minimum execution time: 200_000_000 picoseconds. + Weight::from_parts(200_000_000, 0) + .saturating_add(Weight::from_parts(0, 7200)) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(6)) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs index 101b8d86d55790241d364fa50900a22ed0e418ef..5960ab7b55054f228d1e946e5d6e5ff7dd9706ac 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs @@ -187,6 +187,7 @@ fn construct_extrinsic( OnBridgeHubRococoRefundBridgeHubWestendMessages::default(), OnBridgeHubRococoRefundRococoBulletinMessages::default(), ), + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), ); let payload = SignedPayload::new(call.clone(), extra.clone()).unwrap(); let signature = payload.using_encoded(|e| sender.sign(e)); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs index fad357b095148fd07db14762b764d29b4a4366fa..776c505fa640fa956ae07e9928ccf2f9293e80dd 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs @@ -64,6 +64,7 @@ fn construct_extrinsic( bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages::default(), bridge_to_bulletin_config::OnBridgeHubRococoRefundRococoBulletinMessages::default(), ), + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), ); let payload = SignedPayload::new(call.clone(), extra.clone()).unwrap(); let signature = payload.using_encoded(|e| sender.sign(e)); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml index 4eb201eedc115edcdb337534c18a04ac5379ae80..86560caca99ca344065373330638c3232a417f21 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml @@ -16,7 +16,7 @@ substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } hex-literal = { version = "0.4.1" } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } # Substrate @@ -71,31 +71,33 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } + pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } testnet-parachains-constants = { path = "../../constants", default-features = false, features = ["westend"] } # Bridges -bp-asset-hub-rococo = { path = "../../../../../bridges/primitives/chain-asset-hub-rococo", default-features = false } -bp-asset-hub-westend = { path = "../../../../../bridges/primitives/chain-asset-hub-westend", default-features = false } -bp-bridge-hub-rococo = { path = "../../../../../bridges/primitives/chain-bridge-hub-rococo", default-features = false } -bp-bridge-hub-westend = { path = "../../../../../bridges/primitives/chain-bridge-hub-westend", default-features = false } +bp-asset-hub-rococo = { path = "../../../../../bridges/chains/chain-asset-hub-rococo", default-features = false } +bp-asset-hub-westend = { path = "../../../../../bridges/chains/chain-asset-hub-westend", default-features = false } +bp-bridge-hub-rococo = { path = "../../../../../bridges/chains/chain-bridge-hub-rococo", default-features = false } +bp-bridge-hub-westend = { path = "../../../../../bridges/chains/chain-bridge-hub-westend", default-features = false } bp-header-chain = { path = "../../../../../bridges/primitives/header-chain", default-features = false } bp-messages = { path = "../../../../../bridges/primitives/messages", default-features = false } bp-parachains = { path = "../../../../../bridges/primitives/parachains", default-features = false } bp-polkadot-core = { path = "../../../../../bridges/primitives/polkadot-core", default-features = false } bp-relayers = { path = "../../../../../bridges/primitives/relayers", default-features = false } bp-runtime = { path = "../../../../../bridges/primitives/runtime", default-features = false } -bp-rococo = { path = "../../../../../bridges/primitives/chain-rococo", default-features = false } -bp-westend = { path = "../../../../../bridges/primitives/chain-westend", default-features = false } +bp-rococo = { path = "../../../../../bridges/chains/chain-rococo", default-features = false } +bp-westend = { path = "../../../../../bridges/chains/chain-westend", default-features = false } pallet-bridge-grandpa = { path = "../../../../../bridges/modules/grandpa", default-features = false } pallet-bridge-messages = { path = "../../../../../bridges/modules/messages", default-features = false } pallet-bridge-parachains = { path = "../../../../../bridges/modules/parachains", default-features = false } pallet-bridge-relayers = { path = "../../../../../bridges/modules/relayers", default-features = false } pallet-xcm-bridge-hub = { path = "../../../../../bridges/modules/xcm-bridge-hub", default-features = false } bridge-runtime-common = { path = "../../../../../bridges/bin/runtime-common", default-features = false } -bridge-hub-common = { path = "../../bridge-hubs/common", default-features = false } +bridge-hub-common = { path = "../common", default-features = false } [dev-dependencies] static_assertions = "1.1" @@ -128,6 +130,7 @@ std = [ "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-aura/std", "cumulus-primitives-core/std", + "cumulus-primitives-storage-weight-reclaim/std", "cumulus-primitives-utility/std", "frame-benchmarking/std", "frame-executive/std", diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 9bdea6b9a7dd72f7f280b456f6624f12153dbbba..4318df8f15edb5fbe1ad14d04e95e9821d82ccb6 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -109,6 +109,7 @@ pub type SignedExtra = ( pallet_transaction_payment::ChargeTransactionPayment, BridgeRejectObsoleteHeadersAndMessages, (bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages,), + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. @@ -348,6 +349,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; } impl cumulus_pallet_aura_ext::Config for Runtime {} @@ -1153,6 +1155,7 @@ mod tests { ( bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages::default(), ), + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new() ); { diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs index a78ff2355efaf06562e44828a8df0730481d4098..9cf4c61466a1bd37ffea56681648b2c2e2ce4555 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,8 +64,30 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 19_527_000 picoseconds. - Weight::from_parts(19_839_000, 0) + // Minimum execution time: 19_702_000 picoseconds. + Weight::from_parts(20_410_000, 0) + .saturating_add(Weight::from_parts(0, 3503)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn send_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `38` + // Estimated: `3503` + // Minimum execution time: 19_525_000 picoseconds. + Weight::from_parts(20_071_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -90,8 +112,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `107` // Estimated: `3593` - // Minimum execution time: 90_938_000 picoseconds. - Weight::from_parts(92_822_000, 0) + // Minimum execution time: 91_793_000 picoseconds. + Weight::from_parts(93_761_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -126,8 +148,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `107` // Estimated: `3593` - // Minimum execution time: 90_133_000 picoseconds. - Weight::from_parts(92_308_000, 0) + // Minimum execution time: 91_819_000 picoseconds. + Weight::from_parts(93_198_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -142,14 +164,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn execute_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_205_000 picoseconds. - Weight::from_parts(6_595_000, 0) + // Minimum execution time: 6_183_000 picoseconds. + Weight::from_parts(6_598_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -159,8 +191,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_927_000 picoseconds. - Weight::from_parts(2_062_000, 0) + // Minimum execution time: 1_987_000 picoseconds. + Weight::from_parts(2_076_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -186,8 +218,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 25_078_000 picoseconds. - Weight::from_parts(25_782_000, 0) + // Minimum execution time: 25_375_000 picoseconds. + Weight::from_parts(26_165_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -212,8 +244,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `255` // Estimated: `3720` - // Minimum execution time: 28_188_000 picoseconds. - Weight::from_parts(28_826_000, 0) + // Minimum execution time: 28_167_000 picoseconds. + Weight::from_parts(28_792_000, 0) .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -224,8 +256,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_886_000 picoseconds. - Weight::from_parts(1_991_000, 0) + // Minimum execution time: 2_039_000 picoseconds. + Weight::from_parts(2_211_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -235,8 +267,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89` // Estimated: `13454` - // Minimum execution time: 17_443_000 picoseconds. - Weight::from_parts(17_964_000, 0) + // Minimum execution time: 17_127_000 picoseconds. + Weight::from_parts(17_519_000, 0) .saturating_add(Weight::from_parts(0, 13454)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -247,8 +279,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `93` // Estimated: `13458` - // Minimum execution time: 17_357_000 picoseconds. - Weight::from_parts(18_006_000, 0) + // Minimum execution time: 16_701_000 picoseconds. + Weight::from_parts(17_250_000, 0) .saturating_add(Weight::from_parts(0, 13458)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -259,8 +291,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `15946` - // Minimum execution time: 18_838_000 picoseconds. - Weight::from_parts(19_688_000, 0) + // Minimum execution time: 18_795_000 picoseconds. + Weight::from_parts(19_302_000, 0) .saturating_add(Weight::from_parts(0, 15946)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -282,8 +314,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 25_517_000 picoseconds. - Weight::from_parts(26_131_000, 0) + // Minimum execution time: 25_007_000 picoseconds. + Weight::from_parts(25_786_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -294,8 +326,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `136` // Estimated: `11026` - // Minimum execution time: 11_587_000 picoseconds. - Weight::from_parts(11_963_000, 0) + // Minimum execution time: 11_534_000 picoseconds. + Weight::from_parts(11_798_000, 0) .saturating_add(Weight::from_parts(0, 11026)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -305,8 +337,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `100` // Estimated: `13465` - // Minimum execution time: 17_490_000 picoseconds. - Weight::from_parts(18_160_000, 0) + // Minimum execution time: 17_357_000 picoseconds. + Weight::from_parts(17_629_000, 0) .saturating_add(Weight::from_parts(0, 13465)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -329,8 +361,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `13471` - // Minimum execution time: 34_088_000 picoseconds. - Weight::from_parts(34_598_000, 0) + // Minimum execution time: 33_487_000 picoseconds. + Weight::from_parts(34_033_000, 0) .saturating_add(Weight::from_parts(0, 13471)) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) @@ -343,8 +375,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 3_566_000 picoseconds. - Weight::from_parts(3_754_000, 0) + // Minimum execution time: 3_688_000 picoseconds. + Weight::from_parts(3_854_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -355,8 +387,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 25_078_000 picoseconds. - Weight::from_parts(25_477_000, 0) + // Minimum execution time: 26_336_000 picoseconds. + Weight::from_parts(26_873_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -367,8 +399,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 34_661_000 picoseconds. - Weight::from_parts(35_411_000, 0) + // Minimum execution time: 34_633_000 picoseconds. + Weight::from_parts(35_171_000, 0) .saturating_add(Weight::from_parts(0, 3555)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs index 235b7f146c8e0f141999ef5a27fe1a21cde7b76b..988b10e1e2d8fac610c2feaec41d018012ec9fc3 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs @@ -78,6 +78,7 @@ fn construct_extrinsic( pallet_transaction_payment::ChargeTransactionPayment::::from(0), BridgeRejectObsoleteHeadersAndMessages::default(), (bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages::default(),), + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), ); let payload = SignedPayload::new(call.clone(), extra.clone()).unwrap(); let signature = payload.using_encoded(|e| sender.sign(e)); diff --git a/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml index a4dcd19dc9e8675599eaad9c2d340eca5874e63b..2ab6ee7995f25fcb2475816e2eb83909ea3aa35d 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml @@ -8,7 +8,7 @@ license = "Apache-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../../../../substrate/frame/support", default-features = false } sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml index 4224b3971398ed87c56aa90fe8f82d9f07e0e4bd..22821170a54c9ef0faf6fb3756b6d62234bf654a 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml @@ -13,7 +13,7 @@ workspace = true codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } hex-literal = { version = "0.4.1" } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } @@ -77,6 +77,8 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } + pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } pallet-collective-content = { path = "../../../pallets/collective-content", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } @@ -172,6 +174,7 @@ std = [ "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-aura/std", "cumulus-primitives-core/std", + "cumulus-primitives-storage-weight-reclaim/std", "cumulus-primitives-utility/std", "frame-benchmarking?/std", "frame-executive/std", diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index d3f588bf25ff21768f700a8b1c97007678bee17d..170b0a3960006cd2fbfbe0fb391ae4731a113b6c 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -423,6 +423,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; } impl cumulus_pallet_aura_ext::Config for Runtime {} @@ -712,6 +713,7 @@ pub type SignedExtra = ( frame_system::CheckEra, frame_system::CheckNonce, frame_system::CheckWeight, + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs index 5d427d850046ff030c6c5b6247426849227e7ea1..0edd5dfff2b8b714c6de0a34dcd095787673d39b 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,8 +64,30 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 21_813_000 picoseconds. - Weight::from_parts(22_332_000, 0) + // Minimum execution time: 21_911_000 picoseconds. + Weight::from_parts(22_431_000, 0) + .saturating_add(Weight::from_parts(0, 3610)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn send_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 22_143_000 picoseconds. + Weight::from_parts(22_843_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -90,8 +112,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `214` // Estimated: `3679` - // Minimum execution time: 93_243_000 picoseconds. - Weight::from_parts(95_650_000, 0) + // Minimum execution time: 96_273_000 picoseconds. + Weight::from_parts(98_351_000, 0) .saturating_add(Weight::from_parts(0, 3679)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -126,8 +148,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `214` // Estimated: `3679` - // Minimum execution time: 96_199_000 picoseconds. - Weight::from_parts(98_620_000, 0) + // Minimum execution time: 95_571_000 picoseconds. + Weight::from_parts(96_251_000, 0) .saturating_add(Weight::from_parts(0, 3679)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -142,14 +164,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn execute_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_442_000 picoseconds. - Weight::from_parts(6_682_000, 0) + // Minimum execution time: 6_227_000 picoseconds. + Weight::from_parts(6_419_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -159,8 +191,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_833_000 picoseconds. - Weight::from_parts(1_973_000, 0) + // Minimum execution time: 1_851_000 picoseconds. + Weight::from_parts(1_940_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -186,8 +218,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 27_318_000 picoseconds. - Weight::from_parts(28_224_000, 0) + // Minimum execution time: 27_449_000 picoseconds. + Weight::from_parts(28_513_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -212,8 +244,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `363` // Estimated: `3828` - // Minimum execution time: 29_070_000 picoseconds. - Weight::from_parts(30_205_000, 0) + // Minimum execution time: 29_477_000 picoseconds. + Weight::from_parts(30_251_000, 0) .saturating_add(Weight::from_parts(0, 3828)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -224,8 +256,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_904_000 picoseconds. - Weight::from_parts(2_033_000, 0) + // Minimum execution time: 1_894_000 picoseconds. + Weight::from_parts(2_009_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -235,8 +267,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `159` // Estimated: `13524` - // Minimum execution time: 18_348_000 picoseconds. - Weight::from_parts(18_853_000, 0) + // Minimum execution time: 17_991_000 picoseconds. + Weight::from_parts(18_651_000, 0) .saturating_add(Weight::from_parts(0, 13524)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -247,8 +279,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `163` // Estimated: `13528` - // Minimum execution time: 17_964_000 picoseconds. - Weight::from_parts(18_548_000, 0) + // Minimum execution time: 18_321_000 picoseconds. + Weight::from_parts(18_701_000, 0) .saturating_add(Weight::from_parts(0, 13528)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -259,8 +291,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `173` // Estimated: `16013` - // Minimum execution time: 19_708_000 picoseconds. - Weight::from_parts(20_157_000, 0) + // Minimum execution time: 19_762_000 picoseconds. + Weight::from_parts(20_529_000, 0) .saturating_add(Weight::from_parts(0, 16013)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -282,8 +314,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `6152` - // Minimum execution time: 26_632_000 picoseconds. - Weight::from_parts(27_314_000, 0) + // Minimum execution time: 26_927_000 picoseconds. + Weight::from_parts(27_629_000, 0) .saturating_add(Weight::from_parts(0, 6152)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -294,8 +326,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `206` // Estimated: `11096` - // Minimum execution time: 11_929_000 picoseconds. - Weight::from_parts(12_304_000, 0) + // Minimum execution time: 11_957_000 picoseconds. + Weight::from_parts(12_119_000, 0) .saturating_add(Weight::from_parts(0, 11096)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -305,8 +337,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `170` // Estimated: `13535` - // Minimum execution time: 18_599_000 picoseconds. - Weight::from_parts(19_195_000, 0) + // Minimum execution time: 17_942_000 picoseconds. + Weight::from_parts(18_878_000, 0) .saturating_add(Weight::from_parts(0, 13535)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -329,8 +361,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `13577` - // Minimum execution time: 35_524_000 picoseconds. - Weight::from_parts(36_272_000, 0) + // Minimum execution time: 35_640_000 picoseconds. + Weight::from_parts(36_340_000, 0) .saturating_add(Weight::from_parts(0, 13577)) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) @@ -344,7 +376,7 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Measured: `103` // Estimated: `1588` // Minimum execution time: 4_044_000 picoseconds. - Weight::from_parts(4_238_000, 0) + Weight::from_parts(4_229_000, 0) .saturating_add(Weight::from_parts(0, 1588)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -355,8 +387,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7740` // Estimated: `11205` - // Minimum execution time: 25_741_000 picoseconds. - Weight::from_parts(26_301_000, 0) + // Minimum execution time: 26_262_000 picoseconds. + Weight::from_parts(26_842_000, 0) .saturating_add(Weight::from_parts(0, 11205)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -367,8 +399,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `160` // Estimated: `3625` - // Minimum execution time: 35_925_000 picoseconds. - Weight::from_parts(36_978_000, 0) + // Minimum execution time: 36_775_000 picoseconds. + Weight::from_parts(37_265_000, 0) .saturating_add(Weight::from_parts(0, 3625)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml index e4ac2016a726c4ba9ad63af2ddd332cc6dab415f..74c5b5f8115958d0894af621e6c7307e3f67b9b7 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml @@ -19,7 +19,7 @@ substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } hex-literal = { version = "0.4.1", optional = true } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } @@ -74,6 +74,8 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } + pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } @@ -90,6 +92,7 @@ std = [ "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-aura/std", "cumulus-primitives-core/std", + "cumulus-primitives-storage-weight-reclaim/std", "cumulus-primitives-utility/std", "frame-benchmarking?/std", "frame-executive/std", diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index e1586c7d9b29e1b72eab189f68db034d8414e264..936d87a23caa03e124d1a4fb1e3c3f86849d55cf 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -90,6 +90,7 @@ pub type SignedExtra = ( frame_system::CheckNonce, frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = @@ -318,6 +319,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; } impl cumulus_pallet_aura_ext::Config for Runtime {} diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml index eb92afc431147ef4fa7b64164c0bc5309a465bb7..ee9f5e87ec876d73d80eb088806f5b36f12079db 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml @@ -72,6 +72,7 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } @@ -88,6 +89,7 @@ std = [ "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-aura/std", "cumulus-primitives-core/std", + "cumulus-primitives-storage-weight-reclaim/std", "cumulus-primitives-utility/std", "frame-benchmarking?/std", "frame-executive/std", diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index 86eb5cdfcaf5ebf0a2cbbed87c2642eed6452405..27965aa204f98579fdc0735c0ae278b4e7a0852a 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -99,6 +99,7 @@ pub type SignedExtra = ( frame_system::CheckNonce, frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. @@ -301,6 +302,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; } impl parachain_info::Config for Runtime {} diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs index 2d30ddc612cb9544291b90ea9456e392ab3451d4..89b1c4c86632ff0d19ee4fe1428978303ef92082 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs @@ -16,26 +16,24 @@ //! Autogenerated weights for `pallet_broker` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2024-01-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-j8vvqcjr-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=coretime-rococo-dev -// --wasm-execution=compiled -// --pallet=pallet_broker -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_broker +// --chain=coretime-rococo-dev // --header=./cumulus/file_header.txt // --output=./cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/ @@ -56,8 +54,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_462_000 picoseconds. - Weight::from_parts(2_552_000, 0) + // Minimum execution time: 1_918_000 picoseconds. + Weight::from_parts(2_092_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -67,8 +65,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `10888` // Estimated: `13506` - // Minimum execution time: 25_494_000 picoseconds. - Weight::from_parts(26_063_000, 0) + // Minimum execution time: 21_943_000 picoseconds. + Weight::from_parts(22_570_000, 0) .saturating_add(Weight::from_parts(0, 13506)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -79,8 +77,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `12090` // Estimated: `13506` - // Minimum execution time: 22_299_000 picoseconds. - Weight::from_parts(22_911_000, 0) + // Minimum execution time: 20_923_000 picoseconds. + Weight::from_parts(21_354_000, 0) .saturating_add(Weight::from_parts(0, 13506)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -95,8 +93,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `466` // Estimated: `1951` - // Minimum execution time: 11_590_000 picoseconds. - Weight::from_parts(12_007_000, 0) + // Minimum execution time: 10_687_000 picoseconds. + Weight::from_parts(11_409_000, 0) .saturating_add(Weight::from_parts(0, 1951)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -124,11 +122,11 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `12567` // Estimated: `14052` - // Minimum execution time: 120_928_000 picoseconds. - Weight::from_parts(124_947_252, 0) + // Minimum execution time: 111_288_000 picoseconds. + Weight::from_parts(117_804_282, 0) .saturating_add(Weight::from_parts(0, 14052)) - // Standard Error: 435 - .saturating_add(Weight::from_parts(1_246, 0).saturating_mul(n.into())) + // Standard Error: 391 + .saturating_add(Weight::from_parts(1_243, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(66)) } @@ -144,8 +142,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `316` // Estimated: `3593` - // Minimum execution time: 32_826_000 picoseconds. - Weight::from_parts(33_889_000, 0) + // Minimum execution time: 33_006_000 picoseconds. + Weight::from_parts(34_256_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -166,8 +164,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `434` // Estimated: `4698` - // Minimum execution time: 57_362_000 picoseconds. - Weight::from_parts(58_994_000, 0) + // Minimum execution time: 61_473_000 picoseconds. + Weight::from_parts(66_476_000, 0) .saturating_add(Weight::from_parts(0, 4698)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) @@ -178,8 +176,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `357` // Estimated: `3550` - // Minimum execution time: 13_982_000 picoseconds. - Weight::from_parts(14_447_000, 0) + // Minimum execution time: 13_771_000 picoseconds. + Weight::from_parts(14_374_000, 0) .saturating_add(Weight::from_parts(0, 3550)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -190,8 +188,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `357` // Estimated: `3550` - // Minimum execution time: 15_070_000 picoseconds. - Weight::from_parts(15_735_000, 0) + // Minimum execution time: 15_162_000 picoseconds. + Weight::from_parts(15_742_000, 0) .saturating_add(Weight::from_parts(0, 3550)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -202,8 +200,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `357` // Estimated: `3550` - // Minimum execution time: 16_527_000 picoseconds. - Weight::from_parts(16_894_000, 0) + // Minimum execution time: 16_196_000 picoseconds. + Weight::from_parts(16_796_000, 0) .saturating_add(Weight::from_parts(0, 3550)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(3)) @@ -220,8 +218,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `936` // Estimated: `4681` - // Minimum execution time: 25_493_000 picoseconds. - Weight::from_parts(26_091_000, 0) + // Minimum execution time: 25_653_000 picoseconds. + Weight::from_parts(27_006_000, 0) .saturating_add(Weight::from_parts(0, 4681)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -240,8 +238,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1002` // Estimated: `5996` - // Minimum execution time: 31_498_000 picoseconds. - Weight::from_parts(32_560_000, 0) + // Minimum execution time: 31_114_000 picoseconds. + Weight::from_parts(32_235_000, 0) .saturating_add(Weight::from_parts(0, 5996)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(5)) @@ -257,11 +255,11 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `652` // Estimated: `6196 + m * (2520 ±0)` - // Minimum execution time: 57_183_000 picoseconds. - Weight::from_parts(58_024_898, 0) + // Minimum execution time: 57_280_000 picoseconds. + Weight::from_parts(58_127_480, 0) .saturating_add(Weight::from_parts(0, 6196)) - // Standard Error: 35_831 - .saturating_add(Weight::from_parts(1_384_446, 0).saturating_mul(m.into())) + // Standard Error: 41_670 + .saturating_add(Weight::from_parts(1_203_066, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(T::DbWeight::get().writes(5)) @@ -283,8 +281,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `215` // Estimated: `3680` - // Minimum execution time: 59_762_000 picoseconds. - Weight::from_parts(61_114_000, 0) + // Minimum execution time: 59_968_000 picoseconds. + Weight::from_parts(62_315_000, 0) .saturating_add(Weight::from_parts(0, 3680)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) @@ -297,8 +295,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `465` // Estimated: `3550` - // Minimum execution time: 41_473_000 picoseconds. - Weight::from_parts(44_155_000, 0) + // Minimum execution time: 50_887_000 picoseconds. + Weight::from_parts(57_366_000, 0) .saturating_add(Weight::from_parts(0, 3550)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -313,8 +311,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `463` // Estimated: `3533` - // Minimum execution time: 56_672_000 picoseconds. - Weight::from_parts(58_086_000, 0) + // Minimum execution time: 84_472_000 picoseconds. + Weight::from_parts(96_536_000, 0) .saturating_add(Weight::from_parts(0, 3533)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -331,8 +329,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `857` // Estimated: `3593` - // Minimum execution time: 64_460_000 picoseconds. - Weight::from_parts(65_894_000, 0) + // Minimum execution time: 96_371_000 picoseconds. + Weight::from_parts(104_659_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)) @@ -345,8 +343,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `957` // Estimated: `4698` - // Minimum execution time: 37_447_000 picoseconds. - Weight::from_parts(42_318_000, 0) + // Minimum execution time: 51_741_000 picoseconds. + Weight::from_parts(54_461_000, 0) .saturating_add(Weight::from_parts(0, 4698)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -366,8 +364,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 21_219_000 picoseconds. - Weight::from_parts(22_084_648, 0) + // Minimum execution time: 19_901_000 picoseconds. + Weight::from_parts(21_028_116, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -379,11 +377,11 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `266` // Estimated: `1487` - // Minimum execution time: 5_792_000 picoseconds. - Weight::from_parts(6_358_588, 0) + // Minimum execution time: 5_987_000 picoseconds. + Weight::from_parts(6_412_478, 0) .saturating_add(Weight::from_parts(0, 1487)) - // Standard Error: 20 - .saturating_add(Weight::from_parts(26, 0).saturating_mul(n.into())) + // Standard Error: 16 + .saturating_add(Weight::from_parts(47, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -397,8 +395,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `447` // Estimated: `6196` - // Minimum execution time: 38_690_000 picoseconds. - Weight::from_parts(39_706_000, 0) + // Minimum execution time: 38_623_000 picoseconds. + Weight::from_parts(39_773_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) @@ -414,15 +412,13 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Storage: `Broker::Workplan` (r:0 w:60) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn rotate_sale(n: u32, ) -> Weight { + fn rotate_sale(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `12514` // Estimated: `13506` - // Minimum execution time: 93_531_000 picoseconds. - Weight::from_parts(95_836_318, 0) + // Minimum execution time: 97_074_000 picoseconds. + Weight::from_parts(101_247_740, 0) .saturating_add(Weight::from_parts(0, 13506)) - // Standard Error: 113 - .saturating_add(Weight::from_parts(329, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(65)) } @@ -434,8 +430,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `42` // Estimated: `3493` - // Minimum execution time: 6_506_000 picoseconds. - Weight::from_parts(6_783_000, 0) + // Minimum execution time: 6_317_000 picoseconds. + Weight::from_parts(6_521_000, 0) .saturating_add(Weight::from_parts(0, 3493)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -458,8 +454,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1321` // Estimated: `4786` - // Minimum execution time: 31_927_000 picoseconds. - Weight::from_parts(32_748_000, 0) + // Minimum execution time: 32_575_000 picoseconds. + Weight::from_parts(33_299_000, 0) .saturating_add(Weight::from_parts(0, 4786)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -478,8 +474,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 15_682_000 picoseconds. - Weight::from_parts(16_012_000, 0) + // Minimum execution time: 15_256_000 picoseconds. + Weight::from_parts(15_927_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -490,8 +486,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_147_000 picoseconds. - Weight::from_parts(2_281_000, 0) + // Minimum execution time: 1_783_000 picoseconds. + Weight::from_parts(1_904_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -509,10 +505,22 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `398` // Estimated: `3863` - // Minimum execution time: 12_015_000 picoseconds. - Weight::from_parts(12_619_000, 0) + // Minimum execution time: 12_307_000 picoseconds. + Weight::from_parts(12_967_000, 0) .saturating_add(Weight::from_parts(0, 3863)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `Broker::Leases` (r:1 w:1) + /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(401), added: 896, mode: `MaxEncodedLen`) + fn swap_leases() -> Weight { + // Proof Size summary in bytes: + // Measured: `470` + // Estimated: `1886` + // Minimum execution time: 6_597_000 picoseconds. + Weight::from_parts(6_969_000, 0) + .saturating_add(Weight::from_parts(0, 1886)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs index c5d315467c1ed8b2aabf7ac18abe10931a02951b..df0044089c8f6fb558d368e2f006a6a4fbd9fb97 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -62,8 +62,28 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 35_051_000 picoseconds. - Weight::from_parts(35_200_000, 0) + // Minimum execution time: 18_767_000 picoseconds. + Weight::from_parts(19_420_000, 0) + .saturating_add(Weight::from_parts(0, 3539)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn send_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `74` + // Estimated: `3539` + // Minimum execution time: 19_184_000 picoseconds. + Weight::from_parts(19_695_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -84,8 +104,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 56_235_000 picoseconds. - Weight::from_parts(58_178_000, 0) + // Minimum execution time: 58_120_000 picoseconds. + Weight::from_parts(59_533_000, 0) .saturating_add(Weight::from_parts(0, 3571)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -120,14 +140,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn execute_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_226_000 picoseconds. - Weight::from_parts(6_403_000, 0) + // Minimum execution time: 6_074_000 picoseconds. + Weight::from_parts(6_398_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -137,8 +167,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_020_000 picoseconds. - Weight::from_parts(2_100_000, 0) + // Minimum execution time: 2_036_000 picoseconds. + Weight::from_parts(2_180_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -162,8 +192,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 24_387_000 picoseconds. - Weight::from_parts(24_814_000, 0) + // Minimum execution time: 25_014_000 picoseconds. + Weight::from_parts(25_374_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) @@ -186,8 +216,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `292` // Estimated: `3757` - // Minimum execution time: 27_039_000 picoseconds. - Weight::from_parts(27_693_000, 0) + // Minimum execution time: 27_616_000 picoseconds. + Weight::from_parts(28_499_000, 0) .saturating_add(Weight::from_parts(0, 3757)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -198,8 +228,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_920_000 picoseconds. - Weight::from_parts(2_082_000, 0) + // Minimum execution time: 2_061_000 picoseconds. + Weight::from_parts(2_153_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -209,8 +239,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89` // Estimated: `13454` - // Minimum execution time: 17_141_000 picoseconds. - Weight::from_parts(17_500_000, 0) + // Minimum execution time: 16_592_000 picoseconds. + Weight::from_parts(16_900_000, 0) .saturating_add(Weight::from_parts(0, 13454)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -221,8 +251,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `93` // Estimated: `13458` - // Minimum execution time: 17_074_000 picoseconds. - Weight::from_parts(17_431_000, 0) + // Minimum execution time: 16_694_000 picoseconds. + Weight::from_parts(16_905_000, 0) .saturating_add(Weight::from_parts(0, 13458)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -233,8 +263,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `15946` - // Minimum execution time: 19_139_000 picoseconds. - Weight::from_parts(19_474_000, 0) + // Minimum execution time: 17_779_000 picoseconds. + Weight::from_parts(18_490_000, 0) .saturating_add(Weight::from_parts(0, 15946)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -254,8 +284,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `6082` - // Minimum execution time: 24_346_000 picoseconds. - Weight::from_parts(25_318_000, 0) + // Minimum execution time: 24_526_000 picoseconds. + Weight::from_parts(25_182_000, 0) .saturating_add(Weight::from_parts(0, 6082)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) @@ -266,8 +296,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `136` // Estimated: `11026` - // Minimum execution time: 11_777_000 picoseconds. - Weight::from_parts(12_051_000, 0) + // Minimum execution time: 10_467_000 picoseconds. + Weight::from_parts(10_934_000, 0) .saturating_add(Weight::from_parts(0, 11026)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -277,8 +307,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `100` // Estimated: `13465` - // Minimum execution time: 17_538_000 picoseconds. - Weight::from_parts(17_832_000, 0) + // Minimum execution time: 16_377_000 picoseconds. + Weight::from_parts(17_114_000, 0) .saturating_add(Weight::from_parts(0, 13465)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -299,8 +329,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `13507` - // Minimum execution time: 33_623_000 picoseconds. - Weight::from_parts(34_186_000, 0) + // Minimum execution time: 32_575_000 picoseconds. + Weight::from_parts(33_483_000, 0) .saturating_add(Weight::from_parts(0, 13507)) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) @@ -313,8 +343,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 3_363_000 picoseconds. - Weight::from_parts(3_511_000, 0) + // Minimum execution time: 3_604_000 picoseconds. + Weight::from_parts(3_744_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -325,8 +355,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 23_969_000 picoseconds. - Weight::from_parts(24_347_000, 0) + // Minimum execution time: 23_983_000 picoseconds. + Weight::from_parts(24_404_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -337,8 +367,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 34_071_000 picoseconds. - Weight::from_parts(35_031_000, 0) + // Minimum execution time: 34_446_000 picoseconds. + Weight::from_parts(35_465_000, 0) .saturating_add(Weight::from_parts(0, 3555)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml index b8efecffc5075be04fb3a52410ae86410a01e48d..60cc7e2f765477052332b4c422facb8b19e1f5a4 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml @@ -71,6 +71,8 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } + pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } @@ -87,6 +89,7 @@ std = [ "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-aura/std", "cumulus-primitives-core/std", + "cumulus-primitives-storage-weight-reclaim/std", "cumulus-primitives-utility/std", "frame-benchmarking?/std", "frame-executive/std", diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index c31e474cc2f1718b5de4a8ebb88a19072deba1ba..8075ffb4f1c2200f520131c9eb9d3dc4861db55b 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -99,6 +99,7 @@ pub type SignedExtra = ( frame_system::CheckNonce, frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. @@ -301,6 +302,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; } impl parachain_info::Config for Runtime {} diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs index 8727b9633b1f0eaca2bcaa5f7a43d832f6abbe9b..13d5fcf3898bcc07fce13c7ee2deeb8c2b9fa76f 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs @@ -17,25 +17,23 @@ //! Autogenerated weights for `pallet_broker` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-westend-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=coretime-westend-dev -// --wasm-execution=compiled -// --pallet=pallet_broker -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_broker +// --chain=coretime-westend-dev // --header=./cumulus/file_header.txt // --output=./cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/ @@ -56,8 +54,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_944_000 picoseconds. - Weight::from_parts(2_045_000, 0) + // Minimum execution time: 1_897_000 picoseconds. + Weight::from_parts(2_053_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -67,8 +65,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `10888` // Estimated: `13506` - // Minimum execution time: 21_158_000 picoseconds. - Weight::from_parts(21_572_000, 0) + // Minimum execution time: 22_550_000 picoseconds. + Weight::from_parts(22_871_000, 0) .saturating_add(Weight::from_parts(0, 13506)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -79,8 +77,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `12090` // Estimated: `13506` - // Minimum execution time: 20_497_000 picoseconds. - Weight::from_parts(20_995_000, 0) + // Minimum execution time: 21_170_000 picoseconds. + Weight::from_parts(21_645_000, 0) .saturating_add(Weight::from_parts(0, 13506)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -95,8 +93,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `146` // Estimated: `1631` - // Minimum execution time: 10_280_000 picoseconds. - Weight::from_parts(10_686_000, 0) + // Minimum execution time: 10_494_000 picoseconds. + Weight::from_parts(10_942_000, 0) .saturating_add(Weight::from_parts(0, 1631)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -120,15 +118,13 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Storage: `Broker::Workplan` (r:0 w:20) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn start_sales(n: u32, ) -> Weight { + fn start_sales(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `12247` // Estimated: `13732` - // Minimum execution time: 61_020_000 picoseconds. - Weight::from_parts(63_240_622, 0) + // Minimum execution time: 61_014_000 picoseconds. + Weight::from_parts(63_267_651, 0) .saturating_add(Weight::from_parts(0, 13732)) - // Standard Error: 102 - .saturating_add(Weight::from_parts(255, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(26)) } @@ -144,8 +140,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `316` // Estimated: `3593` - // Minimum execution time: 30_627_000 picoseconds. - Weight::from_parts(31_648_000, 0) + // Minimum execution time: 30_931_000 picoseconds. + Weight::from_parts(31_941_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -166,8 +162,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `434` // Estimated: `4698` - // Minimum execution time: 57_701_000 picoseconds. - Weight::from_parts(59_825_000, 0) + // Minimum execution time: 57_466_000 picoseconds. + Weight::from_parts(65_042_000, 0) .saturating_add(Weight::from_parts(0, 4698)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) @@ -178,8 +174,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `357` // Estimated: `3550` - // Minimum execution time: 12_898_000 picoseconds. - Weight::from_parts(13_506_000, 0) + // Minimum execution time: 12_799_000 picoseconds. + Weight::from_parts(13_401_000, 0) .saturating_add(Weight::from_parts(0, 3550)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -190,8 +186,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `357` // Estimated: `3550` - // Minimum execution time: 14_284_000 picoseconds. - Weight::from_parts(14_791_000, 0) + // Minimum execution time: 14_107_000 picoseconds. + Weight::from_parts(14_630_000, 0) .saturating_add(Weight::from_parts(0, 3550)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -202,8 +198,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `357` // Estimated: `3550` - // Minimum execution time: 15_570_000 picoseconds. - Weight::from_parts(16_158_000, 0) + // Minimum execution time: 15_254_000 picoseconds. + Weight::from_parts(16_062_000, 0) .saturating_add(Weight::from_parts(0, 3550)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(3)) @@ -220,8 +216,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `735` // Estimated: `4681` - // Minimum execution time: 23_329_000 picoseconds. - Weight::from_parts(24_196_000, 0) + // Minimum execution time: 23_557_000 picoseconds. + Weight::from_parts(24_382_000, 0) .saturating_add(Weight::from_parts(0, 4681)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -240,8 +236,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `801` // Estimated: `5996` - // Minimum execution time: 29_288_000 picoseconds. - Weight::from_parts(30_066_000, 0) + // Minimum execution time: 29_371_000 picoseconds. + Weight::from_parts(30_200_000, 0) .saturating_add(Weight::from_parts(0, 5996)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(5)) @@ -257,11 +253,11 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `652` // Estimated: `6196 + m * (2520 ±0)` - // Minimum execution time: 54_833_000 picoseconds. - Weight::from_parts(55_577_423, 0) + // Minimum execution time: 54_331_000 picoseconds. + Weight::from_parts(55_322_165, 0) .saturating_add(Weight::from_parts(0, 6196)) - // Standard Error: 35_105 - .saturating_add(Weight::from_parts(1_267_911, 0).saturating_mul(m.into())) + // Standard Error: 35_225 + .saturating_add(Weight::from_parts(1_099_614, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(T::DbWeight::get().writes(5)) @@ -283,8 +279,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `215` // Estimated: `3680` - // Minimum execution time: 55_289_000 picoseconds. - Weight::from_parts(56_552_000, 0) + // Minimum execution time: 53_789_000 picoseconds. + Weight::from_parts(55_439_000, 0) .saturating_add(Weight::from_parts(0, 3680)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) @@ -297,8 +293,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `465` // Estimated: `3550` - // Minimum execution time: 39_736_000 picoseconds. - Weight::from_parts(41_346_000, 0) + // Minimum execution time: 43_941_000 picoseconds. + Weight::from_parts(49_776_000, 0) .saturating_add(Weight::from_parts(0, 3550)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -313,8 +309,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `463` // Estimated: `3533` - // Minimum execution time: 57_319_000 picoseconds. - Weight::from_parts(60_204_000, 0) + // Minimum execution time: 64_917_000 picoseconds. + Weight::from_parts(70_403_000, 0) .saturating_add(Weight::from_parts(0, 3533)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -331,8 +327,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `857` // Estimated: `3593` - // Minimum execution time: 85_216_000 picoseconds. - Weight::from_parts(91_144_000, 0) + // Minimum execution time: 72_633_000 picoseconds. + Weight::from_parts(79_305_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)) @@ -345,8 +341,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `556` // Estimated: `4698` - // Minimum execution time: 32_331_000 picoseconds. - Weight::from_parts(39_877_000, 0) + // Minimum execution time: 36_643_000 picoseconds. + Weight::from_parts(48_218_000, 0) .saturating_add(Weight::from_parts(0, 4698)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -362,28 +358,28 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 1000]`. - fn request_core_count(n: u32, ) -> Weight { + fn request_core_count(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 18_128_000 picoseconds. - Weight::from_parts(19_061_234, 0) + // Minimum execution time: 17_617_000 picoseconds. + Weight::from_parts(18_904_788, 0) .saturating_add(Weight::from_parts(0, 3539)) - // Standard Error: 48 - .saturating_add(Weight::from_parts(141, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Broker::CoreCountInbox` (r:1 w:1) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn process_core_count(_n: u32, ) -> Weight { + fn process_core_count(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `266` // Estimated: `1487` - // Minimum execution time: 5_368_000 picoseconds. - Weight::from_parts(5_837_005, 0) + // Minimum execution time: 5_575_000 picoseconds. + Weight::from_parts(5_887_598, 0) .saturating_add(Weight::from_parts(0, 1487)) + // Standard Error: 16 + .saturating_add(Weight::from_parts(41, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -397,8 +393,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `447` // Estimated: `6196` - // Minimum execution time: 36_047_000 picoseconds. - Weight::from_parts(37_101_000, 0) + // Minimum execution time: 36_415_000 picoseconds. + Weight::from_parts(37_588_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) @@ -414,13 +410,15 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Storage: `Broker::Workplan` (r:0 w:20) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn rotate_sale(_n: u32, ) -> Weight { + fn rotate_sale(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `12194` // Estimated: `13506` - // Minimum execution time: 48_158_000 picoseconds. - Weight::from_parts(49_891_920, 0) + // Minimum execution time: 48_362_000 picoseconds. + Weight::from_parts(49_616_106, 0) .saturating_add(Weight::from_parts(0, 13506)) + // Standard Error: 61 + .saturating_add(Weight::from_parts(59, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(25)) } @@ -432,8 +430,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `42` // Estimated: `3493` - // Minimum execution time: 5_911_000 picoseconds. - Weight::from_parts(6_173_000, 0) + // Minimum execution time: 6_148_000 picoseconds. + Weight::from_parts(6_374_000, 0) .saturating_add(Weight::from_parts(0, 3493)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -456,8 +454,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1321` // Estimated: `4786` - // Minimum execution time: 30_140_000 picoseconds. - Weight::from_parts(30_912_000, 0) + // Minimum execution time: 30_267_000 picoseconds. + Weight::from_parts(30_825_000, 0) .saturating_add(Weight::from_parts(0, 4786)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -476,8 +474,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 13_684_000 picoseconds. - Weight::from_parts(14_252_000, 0) + // Minimum execution time: 13_491_000 picoseconds. + Weight::from_parts(13_949_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -488,8 +486,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_718_000 picoseconds. - Weight::from_parts(1_843_000, 0) + // Minimum execution time: 1_711_000 picoseconds. + Weight::from_parts(1_913_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -507,10 +505,22 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `398` // Estimated: `3863` - // Minimum execution time: 11_771_000 picoseconds. - Weight::from_parts(12_120_000, 0) + // Minimum execution time: 12_035_000 picoseconds. + Weight::from_parts(12_383_000, 0) .saturating_add(Weight::from_parts(0, 3863)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `Broker::Leases` (r:1 w:1) + /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(81), added: 576, mode: `MaxEncodedLen`) + fn swap_leases() -> Weight { + // Proof Size summary in bytes: + // Measured: `150` + // Estimated: `1566` + // Minimum execution time: 6_142_000 picoseconds. + Weight::from_parts(6_538_000, 0) + .saturating_add(Weight::from_parts(0, 1566)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs index 0082db3099d029c976779af8600bcaf4410e8a2f..a1701c5f1c2ced1bbdcac49863cc05ceb28a019f 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -62,8 +62,28 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 18_410_000 picoseconds. - Weight::from_parts(18_657_000, 0) + // Minimum execution time: 17_681_000 picoseconds. + Weight::from_parts(18_350_000, 0) + .saturating_add(Weight::from_parts(0, 3539)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn send_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `74` + // Estimated: `3539` + // Minimum execution time: 18_091_000 picoseconds. + Weight::from_parts(18_327_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -84,8 +104,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 56_616_000 picoseconds. - Weight::from_parts(57_751_000, 0) + // Minimum execution time: 54_943_000 picoseconds. + Weight::from_parts(56_519_000, 0) .saturating_add(Weight::from_parts(0, 3571)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -120,14 +140,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn execute_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_014_000 picoseconds. - Weight::from_parts(6_412_000, 0) + // Minimum execution time: 5_887_000 picoseconds. + Weight::from_parts(6_101_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -137,8 +167,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_844_000 picoseconds. - Weight::from_parts(1_957_000, 0) + // Minimum execution time: 1_940_000 picoseconds. + Weight::from_parts(2_022_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -162,8 +192,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 24_067_000 picoseconds. - Weight::from_parts(24_553_000, 0) + // Minimum execution time: 23_165_000 picoseconds. + Weight::from_parts(23_800_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) @@ -186,8 +216,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `292` // Estimated: `3757` - // Minimum execution time: 27_023_000 picoseconds. - Weight::from_parts(27_620_000, 0) + // Minimum execution time: 26_506_000 picoseconds. + Weight::from_parts(27_180_000, 0) .saturating_add(Weight::from_parts(0, 3757)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -198,8 +228,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_866_000 picoseconds. - Weight::from_parts(1_984_000, 0) + // Minimum execution time: 1_868_000 picoseconds. + Weight::from_parts(2_002_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -209,8 +239,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89` // Estimated: `13454` - // Minimum execution time: 16_425_000 picoseconds. - Weight::from_parts(16_680_000, 0) + // Minimum execution time: 16_138_000 picoseconds. + Weight::from_parts(16_447_000, 0) .saturating_add(Weight::from_parts(0, 13454)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -221,8 +251,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `93` // Estimated: `13458` - // Minimum execution time: 16_171_000 picoseconds. - Weight::from_parts(16_564_000, 0) + // Minimum execution time: 16_099_000 picoseconds. + Weight::from_parts(16_592_000, 0) .saturating_add(Weight::from_parts(0, 13458)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -233,8 +263,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `15946` - // Minimum execution time: 17_785_000 picoseconds. - Weight::from_parts(18_123_000, 0) + // Minimum execution time: 17_972_000 picoseconds. + Weight::from_parts(18_379_000, 0) .saturating_add(Weight::from_parts(0, 15946)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -254,8 +284,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `6082` - // Minimum execution time: 23_903_000 picoseconds. - Weight::from_parts(24_769_000, 0) + // Minimum execution time: 23_554_000 picoseconds. + Weight::from_parts(24_446_000, 0) .saturating_add(Weight::from_parts(0, 6082)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) @@ -266,8 +296,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `136` // Estimated: `11026` - // Minimum execution time: 10_617_000 picoseconds. - Weight::from_parts(10_843_000, 0) + // Minimum execution time: 10_541_000 picoseconds. + Weight::from_parts(10_894_000, 0) .saturating_add(Weight::from_parts(0, 11026)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -277,8 +307,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `100` // Estimated: `13465` - // Minimum execution time: 16_656_000 picoseconds. - Weight::from_parts(17_106_000, 0) + // Minimum execution time: 16_404_000 picoseconds. + Weight::from_parts(16_818_000, 0) .saturating_add(Weight::from_parts(0, 13465)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -299,8 +329,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `13507` - // Minimum execution time: 31_721_000 picoseconds. - Weight::from_parts(32_547_000, 0) + // Minimum execution time: 31_617_000 picoseconds. + Weight::from_parts(32_336_000, 0) .saturating_add(Weight::from_parts(0, 13507)) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) @@ -313,8 +343,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 3_439_000 picoseconds. - Weight::from_parts(3_619_000, 0) + // Minimum execution time: 3_328_000 picoseconds. + Weight::from_parts(3_501_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -325,8 +355,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 24_657_000 picoseconds. - Weight::from_parts(24_971_000, 0) + // Minimum execution time: 23_571_000 picoseconds. + Weight::from_parts(24_312_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -337,8 +367,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 34_028_000 picoseconds. - Weight::from_parts(34_697_000, 0) + // Minimum execution time: 32_879_000 picoseconds. + Weight::from_parts(33_385_000, 0) .saturating_add(Weight::from_parts(0, 3555)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml index a357bf519e40fcb7b14ae0b2c4e92b0eb6fbb94f..fe9cd25841bfc8c0b14278ed6ea167a46c3dd652 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index cee17cdc7b05de2b9f0736248583cb984547f5f9..ca1a915ba740ac6d9243fe60fe4db4dbe7e9857d 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -212,6 +212,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; } impl parachain_info::Config for Runtime {} diff --git a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml index eebd662c3fd5728f18b4dfc620d2b8f4c707f130..7183be5fc82cc594c6ad2f3738dbfd5b67738a2c 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml @@ -69,6 +69,7 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } @@ -85,6 +86,7 @@ std = [ "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-aura/std", "cumulus-primitives-core/std", + "cumulus-primitives-storage-weight-reclaim/std", "cumulus-primitives-utility/std", "enumflags2/std", "frame-benchmarking?/std", diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index ad369583f07fe96660713ce9d90953895d2394eb..1b6499f5d6178ef16fdf1cb024cfd638e1331e45 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -93,6 +93,7 @@ pub type SignedExtra = ( frame_system::CheckNonce, frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. @@ -282,6 +283,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; type WeightInfo = weights::pallet_message_queue::WeightInfo; } @@ -456,7 +458,7 @@ mod benches { [cumulus_pallet_xcmp_queue, XcmpQueue] [pallet_collator_selection, CollatorSelection] // XCM - [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] ); @@ -644,7 +646,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -680,7 +682,7 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< xcm_config::XcmConfig, diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs index fabce29b5fd9451f27364c38481d2dac98c430d8..ac494fdc719f4139a79b9f92525bde8322267af2 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -62,8 +62,28 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 17_830_000 picoseconds. - Weight::from_parts(18_411_000, 0) + // Minimum execution time: 17_935_000 picoseconds. + Weight::from_parts(18_482_000, 0) + .saturating_add(Weight::from_parts(0, 3503)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn send_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `38` + // Estimated: `3503` + // Minimum execution time: 18_311_000 picoseconds. + Weight::from_parts(18_850_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -84,8 +104,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 55_456_000 picoseconds. - Weight::from_parts(56_808_000, 0) + // Minimum execution time: 56_182_000 picoseconds. + Weight::from_parts(58_136_000, 0) .saturating_add(Weight::from_parts(0, 3535)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -120,14 +140,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn execute_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_996_000 picoseconds. - Weight::from_parts(6_154_000, 0) + // Minimum execution time: 5_979_000 picoseconds. + Weight::from_parts(6_289_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -137,8 +167,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_768_000 picoseconds. - Weight::from_parts(1_914_000, 0) + // Minimum execution time: 1_853_000 picoseconds. + Weight::from_parts(2_045_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -162,8 +192,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 24_120_000 picoseconds. - Weight::from_parts(24_745_000, 0) + // Minimum execution time: 23_827_000 picoseconds. + Weight::from_parts(24_493_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) @@ -186,8 +216,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `255` // Estimated: `3720` - // Minimum execution time: 26_630_000 picoseconds. - Weight::from_parts(27_289_000, 0) + // Minimum execution time: 26_755_000 picoseconds. + Weight::from_parts(27_125_000, 0) .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -198,8 +228,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_821_000 picoseconds. - Weight::from_parts(1_946_000, 0) + // Minimum execution time: 1_898_000 picoseconds. + Weight::from_parts(2_028_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -209,8 +239,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89` // Estimated: `13454` - // Minimum execution time: 16_586_000 picoseconds. - Weight::from_parts(16_977_000, 0) + // Minimum execution time: 16_300_000 picoseconds. + Weight::from_parts(16_995_000, 0) .saturating_add(Weight::from_parts(0, 13454)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -221,8 +251,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `93` // Estimated: `13458` - // Minimum execution time: 16_923_000 picoseconds. - Weight::from_parts(17_415_000, 0) + // Minimum execution time: 16_495_000 picoseconds. + Weight::from_parts(16_950_000, 0) .saturating_add(Weight::from_parts(0, 13458)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -233,8 +263,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `15946` - // Minimum execution time: 18_596_000 picoseconds. - Weight::from_parts(18_823_000, 0) + // Minimum execution time: 18_153_000 picoseconds. + Weight::from_parts(18_595_000, 0) .saturating_add(Weight::from_parts(0, 15946)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -254,8 +284,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 23_817_000 picoseconds. - Weight::from_parts(24_520_000, 0) + // Minimum execution time: 23_387_000 picoseconds. + Weight::from_parts(24_677_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) @@ -266,8 +296,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `136` // Estimated: `11026` - // Minimum execution time: 11_042_000 picoseconds. - Weight::from_parts(11_578_000, 0) + // Minimum execution time: 10_939_000 picoseconds. + Weight::from_parts(11_210_000, 0) .saturating_add(Weight::from_parts(0, 11026)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -277,8 +307,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `100` // Estimated: `13465` - // Minimum execution time: 17_306_000 picoseconds. - Weight::from_parts(17_817_000, 0) + // Minimum execution time: 16_850_000 picoseconds. + Weight::from_parts(17_195_000, 0) .saturating_add(Weight::from_parts(0, 13465)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -299,8 +329,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `13471` - // Minimum execution time: 32_141_000 picoseconds. - Weight::from_parts(32_954_000, 0) + // Minimum execution time: 31_931_000 picoseconds. + Weight::from_parts(32_494_000, 0) .saturating_add(Weight::from_parts(0, 13471)) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) @@ -313,8 +343,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 3_410_000 picoseconds. - Weight::from_parts(3_556_000, 0) + // Minimum execution time: 3_514_000 picoseconds. + Weight::from_parts(3_709_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -325,8 +355,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 25_021_000 picoseconds. - Weight::from_parts(25_240_000, 0) + // Minimum execution time: 24_863_000 picoseconds. + Weight::from_parts(25_293_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -337,8 +367,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 33_801_000 picoseconds. - Weight::from_parts(34_655_000, 0) + // Minimum execution time: 33_799_000 picoseconds. + Weight::from_parts(34_665_000, 0) .saturating_add(Weight::from_parts(0, 3555)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml index 39cb69e679cc26f2d074626757864cb078d0ded8..576c3b1aa4e3eaafafafd0af353580fef7a3c20c 100644 --- a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml @@ -69,6 +69,7 @@ cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-f cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "../../../../primitives/storage-weight-reclaim", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } @@ -85,6 +86,7 @@ std = [ "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-aura/std", "cumulus-primitives-core/std", + "cumulus-primitives-storage-weight-reclaim/std", "cumulus-primitives-utility/std", "enumflags2/std", "frame-benchmarking?/std", diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index c76611ad2a3af67e5a336ab61123f2e32499ebf3..6ae53d641b0c7cd812d405039d59bc93c5749597 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -93,6 +93,7 @@ pub type SignedExtra = ( frame_system::CheckNonce, frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. @@ -282,6 +283,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; type WeightInfo = weights::pallet_message_queue::WeightInfo; } @@ -344,7 +346,7 @@ parameter_types! { pub const StakingAdminBodyId: BodyId = BodyId::Defense; } -/// We allow Root and the `StakingAdmi` to execute privileged collator selection operations. +/// We allow Root and the `StakingAdmin` to execute privileged collator selection operations. pub type CollatorSelectionUpdateOrigin = EitherOfDiverse< EnsureRoot, EnsureXcm>, @@ -456,7 +458,7 @@ mod benches { [cumulus_pallet_xcmp_queue, XcmpQueue] [pallet_collator_selection, CollatorSelection] // XCM - [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] ); @@ -644,7 +646,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -680,7 +682,7 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< xcm_config::XcmConfig, diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs index c337289243b748d721b60421bccf3349044ef194..62a9c802808c0fdc2305f566f7b7f67b4d0a5748 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -62,8 +62,28 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 17_856_000 picoseconds. - Weight::from_parts(18_473_000, 0) + // Minimum execution time: 17_450_000 picoseconds. + Weight::from_parts(17_913_000, 0) + .saturating_add(Weight::from_parts(0, 3503)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn send_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `38` + // Estimated: `3503` + // Minimum execution time: 18_082_000 picoseconds. + Weight::from_parts(18_293_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -84,8 +104,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 56_112_000 picoseconds. - Weight::from_parts(57_287_000, 0) + // Minimum execution time: 54_939_000 picoseconds. + Weight::from_parts(55_721_000, 0) .saturating_add(Weight::from_parts(0, 3535)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -120,14 +140,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn execute_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_186_000 picoseconds. - Weight::from_parts(6_420_000, 0) + // Minimum execution time: 5_789_000 picoseconds. + Weight::from_parts(5_995_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -137,8 +167,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_824_000 picoseconds. - Weight::from_parts(1_999_000, 0) + // Minimum execution time: 1_795_000 picoseconds. + Weight::from_parts(1_924_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -162,8 +192,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 23_833_000 picoseconds. - Weight::from_parts(24_636_000, 0) + // Minimum execution time: 23_445_000 picoseconds. + Weight::from_parts(23_906_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) @@ -186,8 +216,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `255` // Estimated: `3720` - // Minimum execution time: 26_557_000 picoseconds. - Weight::from_parts(27_275_000, 0) + // Minimum execution time: 26_590_000 picoseconds. + Weight::from_parts(27_056_000, 0) .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -198,8 +228,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_921_000 picoseconds. - Weight::from_parts(2_040_000, 0) + // Minimum execution time: 1_889_000 picoseconds. + Weight::from_parts(1_962_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -209,8 +239,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89` // Estimated: `13454` - // Minimum execution time: 16_832_000 picoseconds. - Weight::from_parts(17_312_000, 0) + // Minimum execution time: 16_408_000 picoseconds. + Weight::from_parts(16_877_000, 0) .saturating_add(Weight::from_parts(0, 13454)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -221,8 +251,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `93` // Estimated: `13458` - // Minimum execution time: 16_687_000 picoseconds. - Weight::from_parts(17_123_000, 0) + // Minimum execution time: 16_791_000 picoseconds. + Weight::from_parts(17_111_000, 0) .saturating_add(Weight::from_parts(0, 13458)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -233,8 +263,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `15946` - // Minimum execution time: 18_164_000 picoseconds. - Weight::from_parts(18_580_000, 0) + // Minimum execution time: 18_355_000 picoseconds. + Weight::from_parts(19_110_000, 0) .saturating_add(Weight::from_parts(0, 15946)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -254,8 +284,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 23_577_000 picoseconds. - Weight::from_parts(24_324_000, 0) + // Minimum execution time: 23_354_000 picoseconds. + Weight::from_parts(23_999_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) @@ -266,8 +296,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `136` // Estimated: `11026` - // Minimum execution time: 11_014_000 picoseconds. - Weight::from_parts(11_223_000, 0) + // Minimum execution time: 11_065_000 picoseconds. + Weight::from_parts(11_302_000, 0) .saturating_add(Weight::from_parts(0, 11026)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -277,8 +307,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `100` // Estimated: `13465` - // Minimum execution time: 16_887_000 picoseconds. - Weight::from_parts(17_361_000, 0) + // Minimum execution time: 16_998_000 picoseconds. + Weight::from_parts(17_509_000, 0) .saturating_add(Weight::from_parts(0, 13465)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -299,8 +329,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `13471` - // Minimum execution time: 31_705_000 picoseconds. - Weight::from_parts(32_166_000, 0) + // Minimum execution time: 31_068_000 picoseconds. + Weight::from_parts(31_978_000, 0) .saturating_add(Weight::from_parts(0, 13471)) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) @@ -313,8 +343,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 3_568_000 picoseconds. - Weight::from_parts(3_669_000, 0) + // Minimum execution time: 3_478_000 picoseconds. + Weight::from_parts(3_595_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -325,8 +355,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 24_823_000 picoseconds. - Weight::from_parts(25_344_000, 0) + // Minimum execution time: 24_962_000 picoseconds. + Weight::from_parts(25_404_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -337,8 +367,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 34_516_000 picoseconds. - Weight::from_parts(35_478_000, 0) + // Minimum execution time: 32_685_000 picoseconds. + Weight::from_parts(33_592_000, 0) .saturating_add(Weight::from_parts(0, 3555)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/starters/seedling/Cargo.toml b/cumulus/parachains/runtimes/starters/seedling/Cargo.toml index 9f08fdf59437cc03336eee449e042eca90ec549a..eb702c9f2cdf30844fbf4ea17534566c038c4880 100644 --- a/cumulus/parachains/runtimes/starters/seedling/Cargo.toml +++ b/cumulus/parachains/runtimes/starters/seedling/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } diff --git a/cumulus/parachains/runtimes/starters/shell/Cargo.toml b/cumulus/parachains/runtimes/starters/shell/Cargo.toml index 2f82547afe9a02d4f1b01699869e35aecb526433..f66d04fec1fdd72eb30f183bb713e88d4fee00e7 100644 --- a/cumulus/parachains/runtimes/starters/shell/Cargo.toml +++ b/cumulus/parachains/runtimes/starters/shell/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } diff --git a/cumulus/parachains/runtimes/starters/shell/src/lib.rs b/cumulus/parachains/runtimes/starters/shell/src/lib.rs index 0f4957fd802b441e17fe05f791a7e1dbd047a29e..ad79d6849bd5fc9733ec724a6869f615eb856a54 100644 --- a/cumulus/parachains/runtimes/starters/shell/src/lib.rs +++ b/cumulus/parachains/runtimes/starters/shell/src/lib.rs @@ -232,6 +232,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; } impl cumulus_pallet_aura_ext::Config for Runtime {} diff --git a/cumulus/parachains/runtimes/test-utils/src/lib.rs b/cumulus/parachains/runtimes/test-utils/src/lib.rs index e62daa16a1256fa993a97d82d4f6df96f261018b..3c84243306fbe518dd00ee78411d9ec032e90446 100644 --- a/cumulus/parachains/runtimes/test-utils/src/lib.rs +++ b/cumulus/parachains/runtimes/test-utils/src/lib.rs @@ -425,12 +425,13 @@ impl< } // do teleport - >::teleport_assets( + >::limited_teleport_assets( origin, Box::new(dest.into()), Box::new(beneficiary.into()), Box::new((AssetId(asset), amount).into()), 0, + Unlimited, ) } } diff --git a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml index c18f6571f416beae3b65ffe9b8263696b2ec227c..028aa002a91e508c858c7ba71c981d9c9cf8dc60 100644 --- a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml @@ -21,7 +21,7 @@ substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } hex-literal = { version = "0.4.1", optional = true } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } smallvec = "1.11.0" # Substrate diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index 1e6d485d148d356920c3069fc1017b5f5d703d28..0a55d2dcfe53ee5ba7bf98a223a300f02e97d05f 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -57,7 +57,6 @@ use parachains_common::{ impls::{AssetsToBlockAuthor, NonZeroIssuance}, message_queue::{NarrowOriginToSibling, ParaIdToSibling}, }; -use polkadot_runtime_common::xcm_sender::NoPriceForMessageDelivery; use smallvec::smallvec; use sp_api::impl_runtime_apis; pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; @@ -85,7 +84,7 @@ use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; // XCM Imports use parachains_common::{AccountId, Signature}; -use xcm::latest::prelude::BodyId; +use xcm::latest::prelude::{AssetId as AssetLocationId, BodyId}; /// Balance of an account. pub type Balance = u128; @@ -541,10 +540,25 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; } impl cumulus_pallet_aura_ext::Config for Runtime {} +parameter_types! { + /// The asset ID for the asset that we use to pay for message delivery fees. + pub FeeAssetId: AssetLocationId = AssetLocationId(xcm_config::RelayLocation::get()); + /// The base fee for the message delivery fees (3 CENTS). + pub const BaseDeliveryFee: u128 = (1_000_000_000_000u128 / 100).saturating_mul(3); +} + +pub type PriceForSiblingParachainDelivery = polkadot_runtime_common::xcm_sender::ExponentialPrice< + FeeAssetId, + BaseDeliveryFee, + TransactionByteFee, + XcmpQueue, +>; + impl cumulus_pallet_xcmp_queue::Config for Runtime { type RuntimeEvent = RuntimeEvent; type ChannelInfo = ParachainSystem; @@ -555,7 +569,7 @@ impl cumulus_pallet_xcmp_queue::Config for Runtime { type ControllerOrigin = EnsureRoot; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; type WeightInfo = (); - type PriceForSiblingDelivery = NoPriceForMessageDelivery; + type PriceForSiblingDelivery = PriceForSiblingParachainDelivery; } parameter_types! { diff --git a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs index d83a877c2f89b27c783bd19f19d75736a6dd0022..7b8e40e04288d213499a35ba085655030abe50d2 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs @@ -28,6 +28,7 @@ use super::{ ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, WeightToFee, XcmpQueue, }; +use crate::{BaseDeliveryFee, FeeAssetId, TransactionByteFee}; use core::marker::PhantomData; use frame_support::{ parameter_types, @@ -36,10 +37,10 @@ use frame_support::{ }; use frame_system::EnsureRoot; use pallet_xcm::XcmPassthrough; -use parachains_common::xcm_config::AssetFeeAsExistentialDepositMultiplier; +use parachains_common::{xcm_config::AssetFeeAsExistentialDepositMultiplier, TREASURY_PALLET_ID}; use polkadot_parachain_primitives::primitives::Sibling; -use polkadot_runtime_common::impls::ToAuthor; -use sp_runtime::traits::ConvertInto; +use polkadot_runtime_common::{impls::ToAuthor, xcm_sender::ExponentialPrice}; +use sp_runtime::traits::{AccountIdConversion, ConvertInto}; use xcm::latest::prelude::*; use xcm_builder::{ AccountId32Aliases, AllowKnownQueryResponses, AllowSubscriptionsFrom, @@ -49,6 +50,7 @@ use xcm_builder::{ SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, StartsWith, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, WithComputedOrigin, WithUniqueTopic, + XcmFeeManagerFromComponents, XcmFeeToAccount, }; use xcm_executor::{traits::JustTry, XcmExecutor}; @@ -59,6 +61,7 @@ parameter_types! { pub const RelayNetwork: Option = None; pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); pub UniversalLocation: InteriorLocation = [Parachain(ParachainInfo::parachain_id().into())].into(); + pub TreasuryAccount: AccountId = TREASURY_PALLET_ID.into_account_truncating(); } /// Type for specifying how a `Location` can be converted into an `AccountId`. This is used @@ -142,7 +145,7 @@ pub type ForeignFungiblesTransactor = FungiblesAdapter< LocationToAccountId, // Our chain's account ID type (we can't get away without mentioning it explicitly): AccountId, - // We dont need to check teleports here. + // We don't need to check teleports here. NoChecking, // The account to use for tracking teleports. CheckingAccount, @@ -331,7 +334,10 @@ impl xcm_executor::Config for XcmConfig { type MaxAssetsIntoHolding = MaxAssetsIntoHolding; type AssetLocker = (); type AssetExchanger = (); - type FeeManager = (); + type FeeManager = XcmFeeManagerFromComponents< + (), + XcmFeeToAccount, + >; type MessageExporter = (); type UniversalAliases = Nothing; type CallDispatcher = RuntimeCall; @@ -355,11 +361,14 @@ pub type ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger = /// No local origins on this chain are allowed to dispatch XCM sends/executions. pub type LocalOriginToLocation = SignedToAccountId32; +pub type PriceForParentDelivery = + ExponentialPrice; + /// The means for routing XCM messages which are not for local execution into the right message /// queues. pub type XcmRouter = WithUniqueTopic<( // Two routers - use UMP to communicate with the relay chain: - cumulus_primitives_utility::ParentAsUmp, + cumulus_primitives_utility::ParentAsUmp, // ..and XCMP to communicate with the sibling chains. XcmpQueue, )>; diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml index 790f38d94f502aba2b008927df602c25aa51d151..df3aaa92c79e78e5dfb9043db69778db051b3053 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index c6006141981766320713ec4f7b59af3ee049bc74..034d16267d450a03a395b6b9f7401231c5cfea69 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -320,6 +320,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = (); } impl cumulus_pallet_aura_ext::Config for Runtime {} diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index 37b7be75ef91de38dc9ed47e27e7ebd25ac21e54..280ece30fb6834c2a5031f33cd316b5a137f65f5 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -15,7 +15,7 @@ name = "polkadot-parachain" path = "src/main.rs" [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" clap = { version = "4.5.3", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.28" diff --git a/cumulus/polkadot-parachain/src/command.rs b/cumulus/polkadot-parachain/src/command.rs index 9ba7b7876b3ee06f0d61c626d4fa3c234e313ff9..ac9c6b6f978aca7e021944b35870700cbb88a88e 100644 --- a/cumulus/polkadot-parachain/src/command.rs +++ b/cumulus/polkadot-parachain/src/command.rs @@ -135,7 +135,7 @@ fn runtime(id: &str) -> Runtime { fn load_spec(id: &str) -> std::result::Result, String> { let (id, _, para_id) = extract_parachain_id(id); Ok(match id { - // - Defaul-like + // - Default-like "staging" => Box::new(chain_spec::rococo_parachain::staging_rococo_parachain_local_config()), "tick" => Box::new(GenericChainSpec::from_json_bytes( diff --git a/cumulus/polkadot-parachain/src/service.rs b/cumulus/polkadot-parachain/src/service.rs index ddf595ca70c1d675aa66a94daf21036cc8a3295e..e9bb5947522b2b092e7a7b05729d795bc1d85721 100644 --- a/cumulus/polkadot-parachain/src/service.rs +++ b/cumulus/polkadot-parachain/src/service.rs @@ -69,13 +69,11 @@ use substrate_prometheus_endpoint::Registry; use polkadot_primitives::CollatorPair; #[cfg(not(feature = "runtime-benchmarks"))] -type HostFunctions = - (sp_io::SubstrateHostFunctions, cumulus_client_service::storage_proof_size::HostFunctions); +type HostFunctions = cumulus_client_service::ParachainHostFunctions; #[cfg(feature = "runtime-benchmarks")] type HostFunctions = ( - sp_io::SubstrateHostFunctions, - cumulus_client_service::storage_proof_size::HostFunctions, + cumulus_client_service::ParachainHostFunctions, frame_benchmarking::benchmarking::HostFunctions, ); diff --git a/cumulus/primitives/core/Cargo.toml b/cumulus/primitives/core/Cargo.toml index 32c5054f359c4102a0c0a5ffdcb128bdc1ffd725..62c3f6751917ad4aaaaec6509c852a5cfd75d7f8 100644 --- a/cumulus/primitives/core/Cargo.toml +++ b/cumulus/primitives/core/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate sp-api = { path = "../../../substrate/primitives/api", default-features = false } diff --git a/cumulus/primitives/parachain-inherent/Cargo.toml b/cumulus/primitives/parachain-inherent/Cargo.toml index f434305a0ce013ae048087bf9a75413efacc1fff..fcf4c93bc2f01dc9e60f5ad2d74e302f5247b955 100644 --- a/cumulus/primitives/parachain-inherent/Cargo.toml +++ b/cumulus/primitives/parachain-inherent/Cargo.toml @@ -10,9 +10,9 @@ license = "Apache-2.0" workspace = true [dependencies] -async-trait = { version = "0.1.74", optional = true } +async-trait = { version = "0.1.79", optional = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate sp-core = { path = "../../../substrate/primitives/core", default-features = false } diff --git a/cumulus/primitives/storage-weight-reclaim/Cargo.toml b/cumulus/primitives/storage-weight-reclaim/Cargo.toml index 4835fb5192b88165c1e7912862492086ddd0853e..6dbf7904bf796e81415b967637d9770356142603 100644 --- a/cumulus/primitives/storage-weight-reclaim/Cargo.toml +++ b/cumulus/primitives/storage-weight-reclaim/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../substrate/frame/system", default-features = false } @@ -20,9 +20,9 @@ frame-system = { path = "../../../substrate/frame/system", default-features = fa sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } sp-std = { path = "../../../substrate/primitives/std", default-features = false } -cumulus-primitives-core = { path = "../../primitives/core", default-features = false } -cumulus-primitives-proof-size-hostfunction = { path = "../../primitives/proof-size-hostfunction", default-features = false } -docify = "0.2.7" +cumulus-primitives-core = { path = "../core", default-features = false } +cumulus-primitives-proof-size-hostfunction = { path = "../proof-size-hostfunction", default-features = false } +docify = "0.2.8" [dev-dependencies] sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } diff --git a/cumulus/primitives/storage-weight-reclaim/src/lib.rs b/cumulus/primitives/storage-weight-reclaim/src/lib.rs index 5dddc92e395574dbb36538f3799c0d21b22a3939..c09c12d7a0abf8ac3c64974390d47880af6ec6e9 100644 --- a/cumulus/primitives/storage-weight-reclaim/src/lib.rs +++ b/cumulus/primitives/storage-weight-reclaim/src/lib.rs @@ -421,7 +421,7 @@ mod tests { .unwrap(); assert_eq!(pre, Some(100)); - // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` + // The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo` // we always need to call `post_dispatch` to verify that they interoperate correctly. assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); assert_ok!(StorageWeightReclaim::::post_dispatch( @@ -456,7 +456,7 @@ mod tests { .unwrap(); assert_eq!(pre, Some(100)); - // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` + // The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo` // we always need to call `post_dispatch` to verify that they interoperate correctly. assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); assert_ok!(StorageWeightReclaim::::post_dispatch( @@ -500,7 +500,7 @@ mod tests { &Ok(()) )); // `CheckWeight` gets called after `StorageWeightReclaim` this time. - // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` + // The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo` // we always need to call `post_dispatch` to verify that they interoperate correctly. assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); @@ -536,7 +536,7 @@ mod tests { &Ok(()) )); // `CheckWeight` gets called after `StorageWeightReclaim` this time. - // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` + // The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo` // we always need to call `post_dispatch` to verify that they interoperate correctly. assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); diff --git a/cumulus/primitives/timestamp/src/lib.rs b/cumulus/primitives/timestamp/src/lib.rs index 535c4a2a7268b9df053021d824fd0fbb0599845b..e6aba6d0bb74043c134400387d84429cf0a712d1 100644 --- a/cumulus/primitives/timestamp/src/lib.rs +++ b/cumulus/primitives/timestamp/src/lib.rs @@ -22,7 +22,7 @@ //! access to any clock from the runtime the timestamp is always passed as an inherent into the //! runtime. To check this inherent when validating the block, we will use the relay chain slot. As //! the relay chain slot is derived from a timestamp, we can easily convert it back to a timestamp -//! by muliplying it with the slot duration. By comparing the relay chain slot derived timestamp +//! by multiplying it with the slot duration. By comparing the relay chain slot derived timestamp //! with the timestamp we can ensure that the parachain timestamp is reasonable. #![cfg_attr(not(feature = "std"), no_std)] diff --git a/cumulus/primitives/utility/src/lib.rs b/cumulus/primitives/utility/src/lib.rs index abc391bdcb8ed6ac3682c761e6a0a8d2d0e7f14b..d5d411356dc385948f31730ba65dcd26074d0336 100644 --- a/cumulus/primitives/utility/src/lib.rs +++ b/cumulus/primitives/utility/src/lib.rs @@ -141,7 +141,7 @@ impl< ) -> Result { log::trace!(target: "xcm::weight", "TakeFirstAssetTrader::buy_weight weight: {:?}, payment: {:?}, context: {:?}", weight, payment, context); - // Make sure we dont enter twice + // Make sure we don't enter twice if self.0.is_some() { return Err(XcmError::NotWithdrawable) } @@ -176,7 +176,7 @@ impl< // Convert to the same kind of asset, with the required fungible balance let required = first.id.clone().into_asset(asset_balance.into()); - // Substract payment + // Subtract payment let unused = payment.checked_sub(required.clone()).map_err(|_| XcmError::TooExpensive)?; // record weight and asset @@ -203,7 +203,7 @@ impl< // Calculate asset_balance // This read should have already be cached in buy_weight - let (asset_balance, outstanding_minus_substracted) = + let (asset_balance, outstanding_minus_subtracted) = FeeCharger::charge_weight_in_fungibles(local_asset_id, weight).ok().map( |asset_balance| { // Require at least a drop of minimum_balance @@ -221,16 +221,15 @@ impl< )?; // Convert balances into u128 - let outstanding_minus_substracted: u128 = - outstanding_minus_substracted.saturated_into(); + let outstanding_minus_subtracted: u128 = outstanding_minus_subtracted.saturated_into(); let asset_balance: u128 = asset_balance.saturated_into(); - // Construct outstanding_concrete_asset with the same location id and substracted + // Construct outstanding_concrete_asset with the same location id and subtracted // balance let outstanding_concrete_asset: Asset = - (id.clone(), outstanding_minus_substracted).into(); + (id.clone(), outstanding_minus_subtracted).into(); - // Substract from existing weight and balance + // Subtract from existing weight and balance weight_outstanding = weight_outstanding.saturating_sub(weight); // Override AssetTraderRefunder @@ -263,9 +262,10 @@ impl< } } -/// XCM fee depositor to which we implement the TakeRevenue trait -/// It receives a Transact implemented argument, a 32 byte convertible acocuntId, and the fee -/// receiver account FungiblesMutateAdapter should be identical to that implemented by WithdrawAsset +/// XCM fee depositor to which we implement the `TakeRevenue` trait. +/// It receives a `Transact` implemented argument and a 32 byte convertible `AccountId`, and the fee +/// receiver account's `FungiblesMutateAdapter` should be identical to that implemented by +/// `WithdrawAsset`. pub struct XcmFeesTo32ByteAccount( PhantomData<(FungiblesMutateAdapter, AccountId, ReceiverAccount)>, ); @@ -763,7 +763,8 @@ mod test_trader { /// Implementation of `xcm_builder::EnsureDelivery` which helps to ensure delivery to the /// parent relay chain. Deposits existential deposit for origin (if needed). /// Deposits estimated fee to the origin account (if needed). -/// Allows to trigger additional logic for specific `ParaId` (e.g. open HRMP channel) (if neeeded). +/// Allows triggering of additional logic for a specific `ParaId` (e.g. to open an HRMP channel) if +/// needed. #[cfg(feature = "runtime-benchmarks")] pub struct ToParentDeliveryHelper( sp_std::marker::PhantomData<(XcmConfig, ExistentialDeposit, PriceForDelivery)>, diff --git a/cumulus/scripts/scale_encode_genesis/index.js b/cumulus/scripts/scale_encode_genesis/index.js index f612e6da79dd51452f46bcd0e626b5648b77f2e4..c6600e406361178524998b1a212e25fb97619957 100644 --- a/cumulus/scripts/scale_encode_genesis/index.js +++ b/cumulus/scripts/scale_encode_genesis/index.js @@ -19,14 +19,14 @@ async function connect(endpoint, types = {}) { } if (!process.argv[2] || !process.argv[3]) { - console.log("usage: node generate_keys [rpc enpoint]"); + console.log("usage: node generate_keys [rpc endpoint]"); exit(); } const input = process.argv[2]; const output = process.argv[3]; // default to localhost and the default Substrate port -const rpcEnpoint = process.argv[4] || "ws://localhost:9944"; +const rpcEndpoint = process.argv[4] || "ws://localhost:9944"; console.log("Processing", input, output); fs.readFile(input, "utf8", (err, data) => { @@ -38,8 +38,8 @@ fs.readFile(input, "utf8", (err, data) => { const genesis = JSON.parse(data); console.log("loaded genesis, length = ", genesis.length); - console.log(`Connecting to RPC endpoint: ${rpcEnpoint}`); - connect(rpcEnpoint) + console.log(`Connecting to RPC endpoint: ${rpcEndpoint}`); + connect(rpcEndpoint) .then((api) => { console.log('Connected'); const setStorage = api.tx.system.setStorage(genesis); diff --git a/cumulus/scripts/temp_parachain_types.json b/cumulus/scripts/temp_parachain_types.json index f550a6774450deb8abc9651d15a08b1b34b28c48..2509d32be9fd2cab453c5fd440b9879fca317c17 100644 --- a/cumulus/scripts/temp_parachain_types.json +++ b/cumulus/scripts/temp_parachain_types.json @@ -54,7 +54,7 @@ "validity_votes": "Vec", "validator_indices": "BitVec" }, - "CandidatePendingAvailablility": { + "CandidatePendingAvailability": { "core": "u32", "descriptor": "CandidateDescriptor", "availability_votes": "BitVec", diff --git a/cumulus/test/runtime/Cargo.toml b/cumulus/test/runtime/Cargo.toml index 449a8b819bc074e0c891d99d6fa2f42480f56ce6..b430b118fa1f1e514fdd049e1c1bd2b202802886 100644 --- a/cumulus/test/runtime/Cargo.toml +++ b/cumulus/test/runtime/Cargo.toml @@ -10,7 +10,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate frame-executive = { path = "../../../substrate/frame/executive", default-features = false } diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 8e3569b02a1594c7043e7d6aebee50e7feaa6a5b..5127b63f27155835f9f8bcd4ec50a4b47f9cc332 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -139,7 +139,7 @@ pub fn native_version() -> NativeVersion { NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } } -/// We assume that ~10% of the block weight is consumed by `on_initalize` handlers. +/// We assume that ~10% of the block weight is consumed by `on_initialize` handlers. /// This is used to limit the maximal weight of a single extrinsic. const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); /// We allow `Normal` extrinsics to fill up the block up to 75%, the rest can be used diff --git a/cumulus/test/service/Cargo.toml b/cumulus/test/service/Cargo.toml index 45e21432f5b82e58d208088842c813986c94dc88..040fb479f6e8df46cdc17eefc1f3d2cdcf282656 100644 --- a/cumulus/test/service/Cargo.toml +++ b/cumulus/test/service/Cargo.toml @@ -13,7 +13,7 @@ name = "test-parachain" path = "src/main.rs" [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" clap = { version = "4.5.3", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.0.0" } criterion = { version = "0.5.1", features = ["async_tokio"] } @@ -88,7 +88,6 @@ pallet-timestamp = { path = "../../../substrate/frame/timestamp" } futures = "0.3.28" portpicker = "0.1.1" rococo-parachain-runtime = { path = "../../parachains/runtimes/testing/rococo-parachain" } -pallet-im-online = { path = "../../../substrate/frame/im-online" } sp-consensus-grandpa = { path = "../../../substrate/primitives/consensus/grandpa" } sp-authority-discovery = { path = "../../../substrate/primitives/authority-discovery" } cumulus-test-client = { path = "../client" } @@ -106,7 +105,6 @@ runtime-benchmarks = [ "cumulus-primitives-core/runtime-benchmarks", "cumulus-test-client/runtime-benchmarks", "frame-system/runtime-benchmarks", - "pallet-im-online/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "parachains-common/runtime-benchmarks", "polkadot-cli/runtime-benchmarks", diff --git a/cumulus/test/service/benches/validate_block.rs b/cumulus/test/service/benches/validate_block.rs index a614863803e09e89ff9671dd6916ccbfeb657278..d95f733969bce01b16229994ad4f88f6fea312e1 100644 --- a/cumulus/test/service/benches/validate_block.rs +++ b/cumulus/test/service/benches/validate_block.rs @@ -47,7 +47,7 @@ fn create_extrinsics( src_accounts: &[sr25519::Pair], dst_accounts: &[sr25519::Pair], ) -> (usize, Vec) { - // Add as many tranfer extrinsics as possible into a single block. + // Add as many transfer extrinsics as possible into a single block. let mut block_builder = BlockBuilderBuilder::new(client) .on_parent_block(client.chain_info().best_hash) .with_parent_block_number(client.chain_info().best_number) diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index 3554a383f219e7465f5df875bdf6463b93119f3b..3af3901d175e5a113f137741a5de2ca11544c238 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -735,7 +735,7 @@ pub fn node_config( tokio_handle: tokio::runtime::Handle, key: Sr25519Keyring, nodes: Vec, - nodes_exlusive: bool, + nodes_exclusive: bool, para_id: ParaId, is_collator: bool, endowed_accounts: Vec, @@ -759,7 +759,7 @@ pub fn node_config( None, ); - if nodes_exlusive { + if nodes_exclusive { network_config.default_peers_set.reserved_nodes = nodes; network_config.default_peers_set.non_reserved_mode = sc_network::config::NonReservedPeerMode::Deny; diff --git a/docker/dockerfiles/binary_injected.Dockerfile b/docker/dockerfiles/binary_injected.Dockerfile index ac1fd5317c67cb4cbef1044a6e2e3379f8ff4662..c8930bd83f0274990aff281e84d75b7add9d1289 100644 --- a/docker/dockerfiles/binary_injected.Dockerfile +++ b/docker/dockerfiles/binary_injected.Dockerfile @@ -2,7 +2,7 @@ FROM docker.io/parity/base-bin # This file allows building a Generic container image # based on one or multiple pre-built Linux binaries. -# Some defaults are set to polkadot but all can be overriden. +# Some defaults are set to polkadot but all can be overridden. SHELL ["/bin/bash", "-c"] diff --git a/docker/scripts/build-injected.sh b/docker/scripts/build-injected.sh index f415cf43c0eeefecbe0fe2a0649bbe9df0e2f960..749d0fa335cc1248fcde1770046fc7c1e31e9fbe 100755 --- a/docker/scripts/build-injected.sh +++ b/docker/scripts/build-injected.sh @@ -20,7 +20,7 @@ PROJECT_ROOT=${PROJECT_ROOT:-$(git rev-parse --show-toplevel)} DOCKERFILE=${DOCKERFILE:-docker/dockerfiles/binary_injected.Dockerfile} VERSION_TOML=$(grep "^version " $PROJECT_ROOT/Cargo.toml | grep -oE "([0-9\.]+-?[0-9]+)") -#n The following VAR have default that can be overriden +#n The following VAR have default that can be overridden DOCKER_OWNER=${DOCKER_OWNER:-parity} # We may get 1..n binaries, comma separated diff --git a/docs/contributor/container.md b/docs/contributor/container.md index dd44b31bfe96994ed54e3eca8610e7d8778f5261..9c542f411c81f4237e69ffaf63d4686eeac204e4 100644 --- a/docs/contributor/container.md +++ b/docs/contributor/container.md @@ -16,7 +16,7 @@ Parity builds and publishes a container image that can be found as `docker.io/pa ## Parity CI image Parity maintains and uses internally a generic "CI" image that can be used as a base to build binaries: [Parity CI -container image](https://github.com/paritytech/scripts/tree/master/dockerfiles/ci-linux): +container image](https://github.com/paritytech/scripts/tree/master/dockerfiles/ci-unified): The command below allows building a Linux binary without having to even install Rust or any dependency locally: @@ -24,14 +24,11 @@ The command below allows building a Linux binary without having to even install docker run --rm -it \ -w /polkadot-sdk \ -v $(pwd):/polkadot-sdk \ - paritytech/ci-linux:production \ + paritytech/ci-unified:bullseye-1.75.0-2024-01-22-v20240222 \ cargo build --release --locked -p polkadot-parachain-bin --bin polkadot-parachain sudo chown -R $(id -u):$(id -g) target/ ``` -If you want to reproduce other steps of CI process you can use the following -[guide](https://github.com/paritytech/scripts#gitlab-ci-for-building-docker-images). - ## Injected image Injecting a binary inside a base image is the quickest option to get a working container image. This only works if you diff --git a/docs/sdk/Cargo.toml b/docs/sdk/Cargo.toml index 3f84d45640f9e4c0e6b8ca4eb8c7cf4e55fd87b4..64b23866f0cd85715404555e5ad67e8727767c93 100644 --- a/docs/sdk/Cargo.toml +++ b/docs/sdk/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-sdk-docs" -description = "The one stop shop for developers of the polakdot-sdk" +description = "The one stop shop for developers of the polkadot-sdk" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "paritytech.github.io" repository.workspace = true @@ -27,7 +27,7 @@ pallet-example-offchain-worker = { path = "../../substrate/frame/examples/offcha # How we build docs in rust-docs simple-mermaid = "0.1.1" -docify = "0.2.7" +docify = "0.2.8" # Polkadot SDK deps, typically all should only be in scope such that we can link to their doc item. node-cli = { package = "staging-node-cli", path = "../../substrate/bin/node/cli" } @@ -66,6 +66,7 @@ pallet-aura = { path = "../../substrate/frame/aura", default-features = false } pallet-timestamp = { path = "../../substrate/frame/timestamp" } pallet-balances = { path = "../../substrate/frame/balances" } pallet-assets = { path = "../../substrate/frame/assets" } +pallet-preimage = { path = "../../substrate/frame/preimage" } pallet-transaction-payment = { path = "../../substrate/frame/transaction-payment" } pallet-utility = { path = "../../substrate/frame/utility" } pallet-multisig = { path = "../../substrate/frame/multisig" } @@ -73,6 +74,8 @@ pallet-proxy = { path = "../../substrate/frame/proxy" } pallet-authorship = { path = "../../substrate/frame/authorship" } pallet-collective = { path = "../../substrate/frame/collective" } pallet-democracy = { path = "../../substrate/frame/democracy" } +pallet-uniques = { path = "../../substrate/frame/uniques" } +pallet-nfts = { path = "../../substrate/frame/nfts" } pallet-scheduler = { path = "../../substrate/frame/scheduler" } # Primitives diff --git a/docs/sdk/src/polkadot_sdk/substrate.rs b/docs/sdk/src/polkadot_sdk/substrate.rs index 5021c55e581f3d9fcd06f8578998ab60db0abb71..69d74d86db1b585692d9df8ae9db08ae454ca9f9 100644 --- a/docs/sdk/src/polkadot_sdk/substrate.rs +++ b/docs/sdk/src/polkadot_sdk/substrate.rs @@ -99,7 +99,7 @@ //! demonstration. //! * [`chain_spec_builder`]: Utility to build more detailed chain-specs for the aforementioned //! node. Other projects typically contain a `build-spec` subcommand that does the same. -//! * [`node_template`](https://github.com/paritytech/polkadot-sdk/tree/master/substrate/bin/node-template): +//! * [`node_template`](https://github.com/paritytech/polkadot-sdk/tree/master/substrate/bin/node): //! a template node that contains a minimal set of features and can act as a starting point of a //! project. //! * [`subkey`]: Substrate's key management utility. diff --git a/docs/sdk/src/polkadot_sdk/templates.rs b/docs/sdk/src/polkadot_sdk/templates.rs index f60c75b8f219334ccc52a9ea8441d21a2dbede8d..4bf0e839c798fc55dd1fa0696c6bfe8411c32bc8 100644 --- a/docs/sdk/src/polkadot_sdk/templates.rs +++ b/docs/sdk/src/polkadot_sdk/templates.rs @@ -22,9 +22,8 @@ //! - [`frontier-parachain-template`](https://github.com/paritytech/frontier-parachain-template): A //! parachain template for launching EVM-compatible parachains. //! -//! [`substrate-node-template`]: https://github.com/paritytech/polkadot-sdk/blob/master/substrate/bin/node-template/ -//! [`substrate-minimal-template`]: https://github.com/paritytech/polkadot-sdk/blob/master/substrate/bin/minimal/ -//! [`cumulus-parachain-template`]: https://github.com/paritytech/polkadot-sdk/blob/master/cumulus/parachain-template/ +//! [`minimal-template`]: https://github.com/paritytech/polkadot-sdk/blob/master/templates/minimal/ +//! [`parachain-template`]: https://github.com/paritytech/polkadot-sdk/blob/master/templates/parachain/ // TODO: in general, we need to make a deliberate choice here of moving a few key templates to this // repo (nothing stays in `substrate-developer-hub`) and the everything else should be community diff --git a/docs/sdk/src/reference_docs/frame_currency.rs b/docs/sdk/src/reference_docs/frame_currency.rs deleted file mode 100644 index 6987d51aec824d370fb4706ae7d5b942a2667a64..0000000000000000000000000000000000000000 --- a/docs/sdk/src/reference_docs/frame_currency.rs +++ /dev/null @@ -1,8 +0,0 @@ -//! FRAME Currency Abstractions and Traits -//! -//! Notes: -//! -//! - History, `Currency` trait. -//! - `Hold` and `Freeze` with diagram. -//! - `HoldReason` and `FreezeReason` -//! - This footgun: diff --git a/docs/sdk/src/reference_docs/frame_pallet_coupling.rs b/docs/sdk/src/reference_docs/frame_pallet_coupling.rs index cca7f9feb3f40ca9f933e9e89ed8739fa2a79b45..be464bbbf8359c77fa549ab80bcb0008244488f9 100644 --- a/docs/sdk/src/reference_docs/frame_pallet_coupling.rs +++ b/docs/sdk/src/reference_docs/frame_pallet_coupling.rs @@ -143,7 +143,7 @@ //! For example, all pallets in `polkadot-sdk` that needed to work with currencies could have been //! tightly coupled with [`pallet_balances`]. But, `polkadot-sdk` also provides [`pallet_assets`] //! (and more implementations by the community), therefore all pallets use traits to loosely couple -//! with balances or assets pallet. More on this in [`crate::reference_docs::frame_currency`]. +//! with balances or assets pallet. More on this in [`crate::reference_docs::frame_tokens`]. //! //! ## Further References //! diff --git a/docs/sdk/src/reference_docs/frame_runtime_upgrades_and_migrations.rs b/docs/sdk/src/reference_docs/frame_runtime_upgrades_and_migrations.rs index 7d870b432218e9e9a06c25f4f19e1f07ca72c6c6..f9a69b892a3152854cdf7b5f97d003e500f64f07 100644 --- a/docs/sdk/src/reference_docs/frame_runtime_upgrades_and_migrations.rs +++ b/docs/sdk/src/reference_docs/frame_runtime_upgrades_and_migrations.rs @@ -16,7 +16,7 @@ //! in a process called "Runtime Upgrades". //! //! Forkless runtime upgrades are a defining feature of the Substrate framework. Updating the -//! runtime logic without forking the code base enables your blockchain to seemlessly evolve +//! runtime logic without forking the code base enables your blockchain to seamlessly evolve //! over time in a deterministic, rules-based manner. It also removes ambiguity for node operators //! and other participants in the network about what is the canonical runtime. //! @@ -24,7 +24,7 @@ //! //! ## Performing a Runtime Upgrade //! -//! To upgrade a runtime, an [`Origin`](frame_system::RawOrigin) with the necesarry permissions +//! To upgrade a runtime, an [`Origin`](frame_system::RawOrigin) with the necessary permissions //! (usually via governance) changes the `:code` storage. Usually, this is performed via a call to //! [`set_code`] (or [`set_code_without_checks`]) with the desired new runtime blob, scheduled //! using [`pallet_scheduler`]. @@ -41,7 +41,7 @@ //! //! The typical use case of a migration is to 'migrate' pallet storage from one layout to another, //! for example when the encoding of a storage item is changed. However, they can also execute -//! arbitary logic such as: +//! arbitrary logic such as: //! //! - Calling arbitrary pallet methods //! - Mutating arbitrary on-chain state @@ -88,7 +88,7 @@ //! //! Prior to deploying migrations, it is critical to perform additional checks to ensure that when //! run in our real runtime they will not brick the chain due to: -//! - Panicing +//! - Panicking //! - Touching too many storage keys and resulting in an excessively large PoV //! - Taking too long to execute //! @@ -131,7 +131,6 @@ //! //! TODO: Link to multi block migration example/s once PR is merged (). //! -//! [`GetStorageVersion`]: frame_support::traits::GetStorageVersion //! [`OnRuntimeUpgrade`]: frame_support::traits::OnRuntimeUpgrade //! [`StorageVersion`]: frame_support::traits::StorageVersion //! [`set_code`]: frame_system::Call::set_code diff --git a/docs/sdk/src/reference_docs/frame_tokens.rs b/docs/sdk/src/reference_docs/frame_tokens.rs new file mode 100644 index 0000000000000000000000000000000000000000..c9d34e2091d897918123b4d0e374bf39a292f207 --- /dev/null +++ b/docs/sdk/src/reference_docs/frame_tokens.rs @@ -0,0 +1,131 @@ +// This file is part of polkadot-sdk. +// +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # FRAME Tokens +//! +//! This reference doc serves as a high-level overview of the token-related logic in FRAME, and +//! how to properly apply it to your use case. +//! +//! On completion of reading this doc, you should have a good understanding of: +//! - The distinction between token traits and trait implementations in FRAME, and why this +//! distinction is helpful +//! - Token-related traits avaliable in FRAME +//! - Token-related trait implementations in FRAME +//! - How to choose the right trait or trait implementation for your use case +//! - Where to go next +//! +//! ## Getting Started +//! +//! The most ubiquitous way to add a token to a FRAME runtime is [`pallet_balances`]. Read +//! more about pallets [here](crate::polkadot_sdk::frame_runtime#pallets). +//! +//! You may then write custom pallets that interact with [`pallet_balances`]. The fastest way to +//! get started with that is by +//! [tightly coupling](crate::reference_docs::frame_pallet_coupling#tight-coupling-pallets) your +//! custom pallet to [`pallet_balances`]. +//! +//! However, to keep pallets flexible and modular, it is often prefered to +//! [loosely couple](crate::reference_docs::frame_pallet_coupling#loosely--coupling-pallets). +//! +//! To achieve loose coupling, +//! we separate token logic into traits and trait implementations. +//! +//! ## Traits and Trait Implementations +//! +//! Broadly speaking, token logic in FRAME can be divided into two categories: traits and +//! trait implementations. +//! +//! **Traits** define common interfaces that types of tokens should implement. For example, the +//! [`fungible::Inspect`](`frame_support::traits::fungible::Inspect`) trait specifies an interface +//! for *inspecting* token state such as the total issuance of the token, the balance of individual +//! accounts, etc. +//! +//! **Trait implementations** are concrete implementations of these traits. For example, one of the +//! many traits [`pallet_balances`] implements is +//! [`fungible::Inspect`](`frame_support::traits::fungible::Inspect`)*. It provides the concrete way +//! of inspecting the total issuance, balance of accounts, etc. There can be many implementations of +//! the same traits. +//! +//! The distinction between traits and trait implementations is helpful because it allows pallets +//! and other logic to be generic over their dependencies, avoiding tight coupling. +//! +//! To illustrate this with an example let's consider [`pallet_preimage`]. This pallet takes a +//! deposit in exchange for storing a preimage for later use. A naive implementation of the +//! pallet may use [`pallet_balances`] in a tightly coupled manner, directly calling methods +//! on the pallet to reserve and unreserve deposits. This approach works well, +//! until someone has a use case requiring that an asset from a different pallet such as +//! [`pallet_assets`] is used for the deposit. Rather than tightly couple [`pallet_preimage`] to +//! [`pallet_balances`], [`pallet_assets`], and every other token-handling pallet a user +//! could possibly specify, [`pallet_preimage`] does not specify a concrete pallet as a dependency +//! but instead accepts any dependency which implements the +//! [`currency::ReservableCurrency`](`frame_support::traits::tokens::currency::ReservableCurrency`) +//! trait, namely via its [`Config::Currency`](`pallet_preimage::pallet::Config::Currency`) +//! associated type. This allows [`pallet_preimage`] to support any arbitrary pallet implementing +//! this trait, without needing any knowledge of what those pallets may be or requiring changes to +//! support new pallets which may be written in the future. +//! +//! Read more about coupling, and the benefits of loose coupling +//! [here](crate::reference_docs::frame_pallet_coupling). +//! +//! ##### *Rust Advanced Tip +//! +//! The knowledge that [`pallet_balances`] implements +//! [`fungible::Inspect`](`frame_support::traits::fungible::Inspect`) is not some arcane knowledge +//! that you have to know by heart or memorize. One can simply look at the list of the implementors +//! of any trait in the Rust Doc to find all implementors (e.g. +//! ), +//! or use the `rust-analyzer` `Implementations` action. +//! +//! ## Fungible Token Traits in FRAME +//! +//! The [`fungible`](`frame_support::traits::fungible`) crate contains the latest set of FRAME +//! fungible token traits, and is recommended to use for all new logic requiring a fungible token. +//! See the crate documentation for more info about these fungible traits. +//! +//! [`fungibles`](`frame_support::traits::fungibles`) provides very similar functionality to +//! [`fungible`](`frame_support::traits::fungible`), except it supports managing multiple tokens. +//! +//! You may notice the trait [`Currency`](`frame_support::traits::Currency`) with similar +//! functionality is also used in the codebase, however this trait is deprecated and existing logic +//! is in the process of being migrated to [`fungible`](`frame_support::traits::fungible`) ([tracking issue](https://github.com/paritytech/polkadot-sdk/issues/226)). +//! +//! ## Fungible Token Trait Implementations in FRAME +//! +//! [`pallet_balances`] implements [`fungible`](`frame_support::traits::fungible`), and is the most +//! commonly used fungible implementation in FRAME. Most of the time, it's used for managing the +//! native token of the blockchain network it's used in. +//! +//! [`pallet_assets`] implements [`fungibles`](`frame_support::traits::fungibles`), and is another +//! popular fungible token implementation. It supports the creation and management of multiple +//! assets in a single crate, making it a good choice when a network requires more assets in +//! addition to its native token. +//! +//! ## Non-Fungible Tokens in FRAME +//! +//! [`pallet_nfts`] is recommended to use for all NFT use cases in FRAME. +//! See the crate documentation for more info about this pallet. +//! +//! [`pallet_uniques`] is deprecated and should not be used. +//! +//! +//! # What Next? +//! +//! - If you are interested in implementing a single fungible token, continue reading the +//! [`fungible`](`frame_support::traits::fungible`) and [`pallet_balances`] docs. +//! - If you are interested in implementing a set of fungible tokens, continue reading the +//! [`fungibles`](`frame_support::traits::fungibles`) trait and [`pallet_assets`] docs. +//! - If you are interested in implementing an NFT, continue reading the [`pallet_nfts`] docs. diff --git a/docs/sdk/src/reference_docs/mod.rs b/docs/sdk/src/reference_docs/mod.rs index a0d8d05b44926c4b2ca9259d0709a8a7dee2fb76..145df8844f26e5b58d737a4290035e3886bf63fe 100644 --- a/docs/sdk/src/reference_docs/mod.rs +++ b/docs/sdk/src/reference_docs/mod.rs @@ -65,9 +65,6 @@ pub mod metadata; /// Learn about how frame-system handles `account-ids`, nonces, consumers and providers. pub mod frame_system_accounts; -/// Learn about the currency-related abstractions provided in FRAME. -pub mod frame_currency; - /// Advice for configuring your development environment for Substrate development. pub mod development_environment_advice; @@ -75,6 +72,9 @@ pub mod development_environment_advice; // TODO: @shawntabrizi @ggwpez https://github.com/paritytech/polkadot-sdk-docs/issues/50 pub mod frame_benchmarking_weight; +/// Learn about the token-related logic in FRAME and how to apply it to your use case. +pub mod frame_tokens; + /// Learn about chain specification file and the genesis state of the blockchain. // TODO: @michalkucharczyk https://github.com/paritytech/polkadot-sdk-docs/issues/51 pub mod chain_spec_genesis; diff --git a/docs/sdk/src/reference_docs/runtime_vs_smart_contract.rs b/docs/sdk/src/reference_docs/runtime_vs_smart_contract.rs index 099512cf4ee1254b9b1e522395b9cff9783d3e3d..379b0c11b2ad077eb320f72225e513d1b9ed6d67 100644 --- a/docs/sdk/src/reference_docs/runtime_vs_smart_contract.rs +++ b/docs/sdk/src/reference_docs/runtime_vs_smart_contract.rs @@ -117,7 +117,7 @@ //! - **Contract Code Updates**: Once deployed, although typically immutable, Smart Contracts can be //! upgraded, but lack of migration logic. The [pallet_contracts](../../../pallet_contracts/index.html) //! allows for contracts to be upgraded by exposing the `set_code` dispatchable. More details on this -//! can be found in [Ink! documentation on upgradeable contracts](https://use.ink/5.x/basics/upgradeable-contracts). +//! can be found in [Ink! documentation on upgradeable contracts](https://use.ink/basics/upgradeable-contracts). //! - **Isolated Impact**: Upgrades or changes to a smart contract generally impact only that //! contract and its users, unlike Runtime upgrades that have a network-wide effect. //! - **Simplicity and Rapid Development**: The development cycle for Smart Contracts is usually diff --git a/polkadot/Cargo.toml b/polkadot/Cargo.toml index b0d71a18eaa13f8e7d188ca6420effa9dde4bb6b..659edcb041c35625b6f7c6ee7841eab27c4c1175 100644 --- a/polkadot/Cargo.toml +++ b/polkadot/Cargo.toml @@ -45,8 +45,8 @@ tikv-jemallocator = { version = "0.5.0", features = ["unprefixed_malloc_on_suppo assert_cmd = "2.0.4" nix = { version = "0.26.1", features = ["signal"] } tempfile = "3.2.0" -tokio = "1.24.2" -substrate-rpc-client = { path = "../substrate/utils/frame/rpc/client/" } +tokio = "1.37" +substrate-rpc-client = { path = "../substrate/utils/frame/rpc/client" } polkadot-core-primitives = { path = "core-primitives" } [build-dependencies] diff --git a/polkadot/cli/Cargo.toml b/polkadot/cli/Cargo.toml index f57efa7ba4361ecdcfa40eb1d63dabb4a0343767..b0c22c5a97fe8eea17a72b6079e560b50c95039f 100644 --- a/polkadot/cli/Cargo.toml +++ b/polkadot/cli/Cargo.toml @@ -22,7 +22,7 @@ cfg-if = "1.0" clap = { version = "4.5.3", features = ["derive"], optional = true } log = { workspace = true, default-features = true } thiserror = { workspace = true } -futures = "0.3.21" +futures = "0.3.30" pyro = { package = "pyroscope", version = "0.5.3", optional = true } pyroscope_pprofrs = { version = "0.2", optional = true } diff --git a/polkadot/cli/src/cli.rs b/polkadot/cli/src/cli.rs index e1bc2309a942a8255dd5c99a0db0725aa3b1b37d..74e190444693183de13c1961e859cf61ab04156b 100644 --- a/polkadot/cli/src/cli.rs +++ b/polkadot/cli/src/cli.rs @@ -122,7 +122,7 @@ pub struct RunCmd { /// Overseer message capacity override. /// - /// **Dangerous!** Do not touch unless explicitly adviced to. + /// **Dangerous!** Do not touch unless explicitly advised to. #[arg(long)] pub overseer_channel_capacity_override: Option, diff --git a/polkadot/core-primitives/Cargo.toml b/polkadot/core-primitives/Cargo.toml index d3aef89cb74d01cfe3293c500d981a38b09b6a8d..8dfa0b87328b1af0bc4cbc1bcd677919acaaf892 100644 --- a/polkadot/core-primitives/Cargo.toml +++ b/polkadot/core-primitives/Cargo.toml @@ -13,7 +13,7 @@ workspace = true sp-core = { path = "../../substrate/primitives/core", default-features = false } sp-std = { path = "../../substrate/primitives/std", default-features = false } sp-runtime = { path = "../../substrate/primitives/runtime", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } [features] diff --git a/polkadot/grafana/README.md b/polkadot/grafana/README.md index 7350001bfa1fcebe367799cc36426522c4cf4f97..e909fdd29a757afba3ba6c76ade6466acffc7bda 100644 --- a/polkadot/grafana/README.md +++ b/polkadot/grafana/README.md @@ -8,7 +8,7 @@ monitor the liveliness and performance of a network and its validators. # How does it work ? Just import the dashboard JSON files from this folder in your Grafana installation. All dashboards are grouped in -folder percategory (like for example `parachains`). The files have been created by Grafana export functionality and +folder per category (like for example `parachains`). The files have been created by Grafana export functionality and follow the data model specified [here](https://grafana.com/docs/grafana/latest/dashboards/json-model/). We aim to keep the dashboards here in sync with the implementation, except dashboards for development and diff --git a/polkadot/node/collation-generation/Cargo.toml b/polkadot/node/collation-generation/Cargo.toml index f72af87c15edf1f1147018de75b79079c8755b41..ebc53a9e01bbe5abc6ba18e500ad3bafcf35ead3 100644 --- a/polkadot/node/collation-generation/Cargo.toml +++ b/polkadot/node/collation-generation/Cargo.toml @@ -10,7 +10,7 @@ description = "Collator-side subsystem that handles incoming candidate submissio workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" gum = { package = "tracing-gum", path = "../gum" } polkadot-erasure-coding = { path = "../../erasure-coding" } polkadot-node-primitives = { path = "../primitives" } diff --git a/polkadot/node/collation-generation/src/error.rs b/polkadot/node/collation-generation/src/error.rs index ac5db6cd7f285b266da2fa2204d92a1cb6331b1c..f04e3c4f20b41f3a0934c65c0563d33edf45487a 100644 --- a/polkadot/node/collation-generation/src/error.rs +++ b/polkadot/node/collation-generation/src/error.rs @@ -27,7 +27,11 @@ pub enum Error { #[error(transparent)] Util(#[from] polkadot_node_subsystem_util::Error), #[error(transparent)] + UtilRuntime(#[from] polkadot_node_subsystem_util::runtime::Error), + #[error(transparent)] Erasure(#[from] polkadot_erasure_coding::Error), + #[error("Parachain backing state not available in runtime.")] + MissingParaBackingState, } pub type Result = std::result::Result; diff --git a/polkadot/node/collation-generation/src/lib.rs b/polkadot/node/collation-generation/src/lib.rs index 3b1a8f5ff2305c022f56aac6bddec1c00599cbd3..60ea1cf5ff48c49f4d6e1de832cd5cdb05d70392 100644 --- a/polkadot/node/collation-generation/src/lib.rs +++ b/polkadot/node/collation-generation/src/lib.rs @@ -38,14 +38,14 @@ use polkadot_node_primitives::{ SubmitCollationParams, }; use polkadot_node_subsystem::{ - messages::{CollationGenerationMessage, CollatorProtocolMessage, RuntimeApiRequest}, + messages::{CollationGenerationMessage, CollatorProtocolMessage}, overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, RuntimeApiError, SpawnedSubsystem, SubsystemContext, SubsystemError, SubsystemResult, }; use polkadot_node_subsystem_util::{ - has_required_runtime, request_async_backing_params, request_availability_cores, - request_claim_queue, request_persisted_validation_data, request_validation_code, - request_validation_code_hash, request_validators, + request_async_backing_params, request_availability_cores, request_para_backing_state, + request_persisted_validation_data, request_validation_code, request_validation_code_hash, + request_validators, vstaging::fetch_claim_queue, }; use polkadot_primitives::{ collator_signature_payload, CandidateCommitments, CandidateDescriptor, CandidateReceipt, @@ -53,10 +53,7 @@ use polkadot_primitives::{ PersistedValidationData, ScheduledCore, ValidationCodeHash, }; use sp_core::crypto::Pair; -use std::{ - collections::{BTreeMap, VecDeque}, - sync::Arc, -}; +use std::sync::Arc; mod error; @@ -212,6 +209,7 @@ async fn handle_new_activations( if config.collator.is_none() { return Ok(()) } + let para_id = config.para_id; let _overall_timer = metrics.time_new_activations(); @@ -225,40 +223,38 @@ async fn handle_new_activations( ); let availability_cores = availability_cores??; - let n_validators = validators??.len(); let async_backing_params = async_backing_params?.ok(); - let maybe_claim_queue = fetch_claim_queue(ctx.sender(), relay_parent).await?; + let n_validators = validators??.len(); + let maybe_claim_queue = fetch_claim_queue(ctx.sender(), relay_parent) + .await + .map_err(crate::error::Error::UtilRuntime)?; - for (core_idx, core) in availability_cores.into_iter().enumerate() { - let _availability_core_timer = metrics.time_new_activations_availability_core(); + // The loop bellow will fill in cores that the para is allowed to build on. + let mut cores_to_build_on = Vec::new(); - let (scheduled_core, assumption) = match core { - CoreState::Scheduled(scheduled_core) => - (scheduled_core, OccupiedCoreAssumption::Free), + for (core_idx, core) in availability_cores.into_iter().enumerate() { + let scheduled_core = match core { + CoreState::Scheduled(scheduled_core) => scheduled_core, CoreState::Occupied(occupied_core) => match async_backing_params { Some(params) if params.max_candidate_depth >= 1 => { // maximum candidate depth when building on top of a block // pending availability is necessarily 1 - the depth of the // pending block is 0 so the child has depth 1. - // TODO [now]: this assumes that next up == current. - // in practice we should only set `OccupiedCoreAssumption::Included` - // when the candidate occupying the core is also of the same para. + // Use claim queue if available, or fallback to `next_up_on_available` let res = match maybe_claim_queue { Some(ref claim_queue) => { - // read what's in the claim queue for this core - fetch_next_scheduled_on_core( - claim_queue, - CoreIndex(core_idx as u32), - ) + // read what's in the claim queue for this core at depth 0. + claim_queue + .get_claim_for(CoreIndex(core_idx as u32), 0) + .map(|para_id| ScheduledCore { para_id, collator: None }) }, None => { // Runtime doesn't support claim queue runtime api. Fallback to // `next_up_on_available` occupied_core.next_up_on_available }, - } - .map(|scheduled| (scheduled, OccupiedCoreAssumption::Included)); + }; match res { Some(res) => res, @@ -279,7 +275,7 @@ async fn handle_new_activations( gum::trace!( target: LOG_TARGET, core_idx = %core_idx, - "core is free. Keep going.", + "core is not assigned to any para. Keep going.", ); continue }, @@ -294,64 +290,90 @@ async fn handle_new_activations( their_para = %scheduled_core.para_id, "core is not assigned to our para. Keep going.", ); - continue + } else { + // Accumulate cores for building collation(s) outside the loop. + cores_to_build_on.push(CoreIndex(core_idx as u32)); } + } - // we get validation data and validation code synchronously for each core instead of - // within the subtask loop, because we have only a single mutable handle to the - // context, so the work can't really be distributed + // Skip to next relay parent if there is no core assigned to us. + if cores_to_build_on.is_empty() { + continue + } - let validation_data = match request_persisted_validation_data( - relay_parent, - scheduled_core.para_id, - assumption, - ctx.sender(), - ) - .await - .await?? - { - Some(v) => v, - None => { - gum::trace!( - target: LOG_TARGET, - core_idx = %core_idx, - relay_parent = ?relay_parent, - our_para = %config.para_id, - their_para = %scheduled_core.para_id, - "validation data is not available", - ); - continue - }, - }; + let para_backing_state = + request_para_backing_state(relay_parent, config.para_id, ctx.sender()) + .await + .await?? + .ok_or(crate::error::Error::MissingParaBackingState)?; + + // We are being very optimistic here, but one of the cores could pend availability some more + // block, ore even time out. + // For timeout assumption the collator can't really know because it doesn't receive bitfield + // gossip. + let assumption = if para_backing_state.pending_availability.is_empty() { + OccupiedCoreAssumption::Free + } else { + OccupiedCoreAssumption::Included + }; + + gum::debug!( + target: LOG_TARGET, + relay_parent = ?relay_parent, + our_para = %config.para_id, + ?assumption, + "Occupied core(s) assumption", + ); - let validation_code_hash = match obtain_validation_code_hash_with_assumption( - relay_parent, - scheduled_core.para_id, - assumption, - ctx.sender(), - ) - .await? - { - Some(v) => v, - None => { - gum::trace!( - target: LOG_TARGET, - core_idx = %core_idx, - relay_parent = ?relay_parent, - our_para = %config.para_id, - their_para = %scheduled_core.para_id, - "validation code hash is not found.", - ); - continue - }, - }; + let mut validation_data = match request_persisted_validation_data( + relay_parent, + config.para_id, + assumption, + ctx.sender(), + ) + .await + .await?? + { + Some(v) => v, + None => { + gum::debug!( + target: LOG_TARGET, + relay_parent = ?relay_parent, + our_para = %config.para_id, + "validation data is not available", + ); + continue + }, + }; + + let validation_code_hash = match obtain_validation_code_hash_with_assumption( + relay_parent, + config.para_id, + assumption, + ctx.sender(), + ) + .await? + { + Some(v) => v, + None => { + gum::debug!( + target: LOG_TARGET, + relay_parent = ?relay_parent, + our_para = %config.para_id, + "validation code hash is not found.", + ); + continue + }, + }; + + let task_config = config.clone(); + let metrics = metrics.clone(); + let mut task_sender = ctx.sender().clone(); - let task_config = config.clone(); - let metrics = metrics.clone(); - let mut task_sender = ctx.sender().clone(); - ctx.spawn( - "collation-builder", - Box::pin(async move { + ctx.spawn( + "chained-collation-builder", + Box::pin(async move { + for core_index in cores_to_build_on { let collator_fn = match task_config.collator.as_ref() { Some(x) => x, None => return, @@ -363,21 +385,23 @@ async fn handle_new_activations( None => { gum::debug!( target: LOG_TARGET, - para_id = %scheduled_core.para_id, + ?para_id, "collator returned no collation on collate", ); return }, }; + let parent_head = collation.head_data.clone(); construct_and_distribute_receipt( PreparedCollation { collation, - para_id: scheduled_core.para_id, + para_id, relay_parent, - validation_data, + validation_data: validation_data.clone(), validation_code_hash, n_validators, + core_index, }, task_config.key.clone(), &mut task_sender, @@ -385,9 +409,13 @@ async fn handle_new_activations( &metrics, ) .await; - }), - )?; - } + + // Chain the collations. All else stays the same as we build the chained + // collation on same relay parent. + validation_data.parent_head = parent_head; + } + }), + )?; } Ok(()) @@ -408,6 +436,7 @@ async fn handle_submit_collation( parent_head, validation_code_hash, result_sender, + core_index, } = params; let validators = request_validators(relay_parent, ctx.sender()).await.await??; @@ -444,6 +473,7 @@ async fn handle_submit_collation( validation_data, validation_code_hash, n_validators, + core_index, }; construct_and_distribute_receipt( @@ -465,6 +495,7 @@ struct PreparedCollation { validation_data: PersistedValidationData, validation_code_hash: ValidationCodeHash, n_validators: usize, + core_index: CoreIndex, } /// Takes a prepared collation, along with its context, and produces a candidate receipt @@ -483,6 +514,7 @@ async fn construct_and_distribute_receipt( validation_data, validation_code_hash, n_validators, + core_index, } = collation; let persisted_validation_data_hash = validation_data.hash(); @@ -578,6 +610,7 @@ async fn construct_and_distribute_receipt( pov, parent_head_data, result_sender, + core_index, }) .await; } @@ -620,37 +653,3 @@ fn erasure_root( let chunks = polkadot_erasure_coding::obtain_chunks_v1(n_validators, &available_data)?; Ok(polkadot_erasure_coding::branches(&chunks).root()) } - -// Checks if the runtime supports `request_claim_queue` and executes it. Returns `Ok(None)` -// otherwise. Any [`RuntimeApiError`]s are bubbled up to the caller. -async fn fetch_claim_queue( - sender: &mut impl overseer::CollationGenerationSenderTrait, - relay_parent: Hash, -) -> crate::error::Result>>> { - if has_required_runtime( - sender, - relay_parent, - RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT, - ) - .await - { - let res = request_claim_queue(relay_parent, sender).await.await??; - Ok(Some(res)) - } else { - gum::trace!(target: LOG_TARGET, "Runtime doesn't support `request_claim_queue`"); - Ok(None) - } -} - -// Returns the next scheduled `ParaId` for a core in the claim queue, wrapped in `ScheduledCore`. -// This function is supposed to be used in `handle_new_activations` hence the return type. -fn fetch_next_scheduled_on_core( - claim_queue: &BTreeMap>, - core_idx: CoreIndex, -) -> Option { - claim_queue - .get(&core_idx)? - .front() - .cloned() - .map(|para_id| ScheduledCore { para_id, collator: None }) -} diff --git a/polkadot/node/collation-generation/src/tests.rs b/polkadot/node/collation-generation/src/tests.rs index 9b16980e6af43e458a644697282aa74d52bc5ca2..1ec2cccfae71ddb195ba6bdad655e47b7ec5d119 100644 --- a/polkadot/node/collation-generation/src/tests.rs +++ b/polkadot/node/collation-generation/src/tests.rs @@ -30,13 +30,19 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_test_helpers::{subsystem_test_harness, TestSubsystemContextHandle}; use polkadot_node_subsystem_util::TimeoutExt; use polkadot_primitives::{ - AsyncBackingParams, CollatorPair, HeadData, Id as ParaId, Id, PersistedValidationData, + async_backing::{BackingState, CandidatePendingAvailability}, + AsyncBackingParams, BlockNumber, CollatorPair, HeadData, PersistedValidationData, ScheduledCore, ValidationCode, }; use rstest::rstest; use sp_keyring::sr25519::Keyring as Sr25519Keyring; -use std::pin::Pin; -use test_helpers::{dummy_candidate_descriptor, dummy_hash, dummy_head_data, dummy_validator}; +use std::{ + collections::{BTreeMap, VecDeque}, + pin::Pin, +}; +use test_helpers::{ + dummy_candidate_descriptor, dummy_hash, dummy_head_data, dummy_validator, make_candidate, +}; type VirtualOverseer = TestSubsystemContextHandle; @@ -105,9 +111,9 @@ impl Future for TestCollator { impl Unpin for TestCollator {} -async fn overseer_recv(overseer: &mut VirtualOverseer) -> AllMessages { - const TIMEOUT: std::time::Duration = std::time::Duration::from_millis(2000); +const TIMEOUT: std::time::Duration = std::time::Duration::from_millis(2000); +async fn overseer_recv(overseer: &mut VirtualOverseer) -> AllMessages { overseer .recv() .timeout(TIMEOUT) @@ -135,6 +141,41 @@ fn scheduled_core_for>(para_id: Id) -> ScheduledCore { ScheduledCore { para_id: para_id.into(), collator: None } } +fn dummy_candidate_pending_availability( + para_id: ParaId, + candidate_relay_parent: Hash, + relay_parent_number: BlockNumber, +) -> CandidatePendingAvailability { + let (candidate, _pvd) = make_candidate( + candidate_relay_parent, + relay_parent_number, + para_id, + dummy_head_data(), + HeadData(vec![1]), + ValidationCode(vec![1, 2, 3]).hash(), + ); + let candidate_hash = candidate.hash(); + + CandidatePendingAvailability { + candidate_hash, + descriptor: candidate.descriptor, + commitments: candidate.commitments, + relay_parent_number, + max_pov_size: 5 * 1024 * 1024, + } +} + +fn dummy_backing_state(pending_availability: Vec) -> BackingState { + let constraints = helpers::dummy_constraints( + 0, + vec![0], + dummy_head_data(), + ValidationCodeHash::from(Hash::repeat_byte(42)), + ); + + BackingState { constraints, pending_availability } +} + #[rstest] #[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT - 1)] #[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)] @@ -176,6 +217,12 @@ fn requests_availability_per_relay_parent(#[case] runtime_version: u32) { ))) if runtime_version >= RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT => { tx.send(Ok(BTreeMap::new())).unwrap(); }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::ParaBackingState(_para_id, tx), + ))) => { + tx.send(Ok(Some(dummy_backing_state(vec![])))).unwrap(); + }, Some(msg) => panic!("didn't expect any other overseer requests given no availability cores; got {:?}", msg), } } @@ -273,6 +320,12 @@ fn requests_validation_data_for_scheduled_matches(#[case] runtime_version: u32) ))) if runtime_version >= RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT => { tx.send(Ok(BTreeMap::new())).unwrap(); }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::ParaBackingState(_para_id, tx), + ))) => { + tx.send(Ok(Some(dummy_backing_state(vec![])))).unwrap(); + }, Some(msg) => { panic!("didn't expect any other overseer requests; got {:?}", msg) }, @@ -384,6 +437,12 @@ fn sends_distribute_collation_message(#[case] runtime_version: u32) { ))) if runtime_version >= RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT => { tx.send(Ok(BTreeMap::new())).unwrap(); }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::ParaBackingState(_para_id, tx), + ))) => { + tx.send(Ok(Some(dummy_backing_state(vec![])))).unwrap(); + }, Some(msg @ AllMessages::CollatorProtocol(_)) => { inner_to_collator_protocol.lock().await.push(msg); }, @@ -561,8 +620,13 @@ fn fallback_when_no_validation_code_hash_api(#[case] runtime_version: u32) { _hash, RuntimeApiRequest::ClaimQueue(tx), ))) if runtime_version >= RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT => { - let res = BTreeMap::>::new(); - tx.send(Ok(res)).unwrap(); + tx.send(Ok(Default::default())).unwrap(); + }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::ParaBackingState(_para_id, tx), + ))) => { + tx.send(Ok(Some(dummy_backing_state(vec![])))).unwrap(); }, Some(msg) => { panic!("didn't expect any other overseer requests; got {:?}", msg) @@ -611,6 +675,7 @@ fn submit_collation_is_no_op_before_initialization() { parent_head: vec![1, 2, 3].into(), validation_code_hash: Hash::repeat_byte(1).into(), result_sender: None, + core_index: CoreIndex(0), }), }) .await; @@ -623,7 +688,7 @@ fn submit_collation_is_no_op_before_initialization() { fn submit_collation_leads_to_distribution() { let relay_parent = Hash::repeat_byte(0); let validation_code_hash = ValidationCodeHash::from(Hash::repeat_byte(42)); - let parent_head = HeadData::from(vec![1, 2, 3]); + let parent_head = dummy_head_data(); let para_id = ParaId::from(5); let expected_pvd = PersistedValidationData { parent_head: parent_head.clone(), @@ -644,9 +709,10 @@ fn submit_collation_leads_to_distribution() { msg: CollationGenerationMessage::SubmitCollation(SubmitCollationParams { relay_parent, collation: test_collation(), - parent_head: vec![1, 2, 3].into(), + parent_head: dummy_head_data(), validation_code_hash, result_sender: None, + core_index: CoreIndex(0), }), }) .await; @@ -716,11 +782,14 @@ fn distribute_collation_for_occupied_core_with_async_backing_enabled(#[case] run candidate_hash: Default::default(), candidate_descriptor: dummy_candidate_descriptor(dummy_hash()), })]; - let claim_queue = BTreeMap::from([(CoreIndex::from(0), VecDeque::from([para_id]))]); + let claim_queue = BTreeMap::from([(CoreIndex::from(0), VecDeque::from([para_id]))]).into(); test_harness(|mut virtual_overseer| async move { helpers::initialize_collator(&mut virtual_overseer, para_id).await; helpers::activate_new_head(&mut virtual_overseer, activated_hash).await; + + let pending_availability = + vec![dummy_candidate_pending_availability(para_id, activated_hash, 1)]; helpers::handle_runtime_calls_on_new_head_activation( &mut virtual_overseer, activated_hash, @@ -730,12 +799,142 @@ fn distribute_collation_for_occupied_core_with_async_backing_enabled(#[case] run claim_queue, ) .await; - helpers::handle_core_processing_for_a_leaf( + helpers::handle_cores_processing_for_a_leaf( &mut virtual_overseer, activated_hash, para_id, // `CoreState` is `Occupied` => `OccupiedCoreAssumption` is `Included` OccupiedCoreAssumption::Included, + 1, + pending_availability, + ) + .await; + + virtual_overseer + }); +} + +// There are variable number of cores of cores in `Occupied` state and async backing is enabled. +// On new head activation `CollationGeneration` should produce and distribute a new collation +// with proper assumption about the para candidate chain availability at next block. +#[rstest] +#[case(0)] +#[case(1)] +#[case(2)] +fn distribute_collation_for_occupied_cores_with_async_backing_enabled_and_elastic_scaling( + #[case] candidates_pending_avail: u32, +) { + let activated_hash: Hash = [1; 32].into(); + let para_id = ParaId::from(5); + + let cores = (0..3) + .into_iter() + .map(|idx| { + CoreState::Occupied(polkadot_primitives::OccupiedCore { + next_up_on_available: Some(ScheduledCore { para_id, collator: None }), + occupied_since: 0, + time_out_at: 10, + next_up_on_time_out: Some(ScheduledCore { para_id, collator: None }), + availability: Default::default(), // doesn't matter + group_responsible: polkadot_primitives::GroupIndex(idx as u32), + candidate_hash: Default::default(), + candidate_descriptor: dummy_candidate_descriptor(dummy_hash()), + }) + }) + .collect::>(); + + let pending_availability = (0..candidates_pending_avail) + .into_iter() + .map(|_idx| dummy_candidate_pending_availability(para_id, activated_hash, 0)) + .collect::>(); + + let claim_queue = cores + .iter() + .enumerate() + .map(|(idx, _core)| (CoreIndex::from(idx as u32), VecDeque::from([para_id]))) + .collect::>(); + let total_cores = cores.len(); + + test_harness(|mut virtual_overseer| async move { + helpers::initialize_collator(&mut virtual_overseer, para_id).await; + helpers::activate_new_head(&mut virtual_overseer, activated_hash).await; + helpers::handle_runtime_calls_on_new_head_activation( + &mut virtual_overseer, + activated_hash, + AsyncBackingParams { max_candidate_depth: 1, allowed_ancestry_len: 1 }, + cores, + // Using latest runtime with the fancy claim queue exposed. + RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT, + claim_queue, + ) + .await; + + helpers::handle_cores_processing_for_a_leaf( + &mut virtual_overseer, + activated_hash, + para_id, + // if at least 1 cores is occupied => `OccupiedCoreAssumption` is `Included` + // else assumption is `Free`. + if candidates_pending_avail > 0 { + OccupiedCoreAssumption::Included + } else { + OccupiedCoreAssumption::Free + }, + total_cores, + pending_availability, + ) + .await; + + virtual_overseer + }); +} + +// There are variable number of cores of cores in `Free` state and async backing is enabled. +// On new head activation `CollationGeneration` should produce and distribute a new collation +// with proper assumption about the para candidate chain availability at next block. +#[rstest] +#[case(0)] +#[case(1)] +#[case(2)] +fn distribute_collation_for_free_cores_with_async_backing_enabled_and_elastic_scaling( + #[case] total_cores: usize, +) { + let activated_hash: Hash = [1; 32].into(); + let para_id = ParaId::from(5); + + let cores = (0..total_cores) + .into_iter() + .map(|_idx| CoreState::Scheduled(ScheduledCore { para_id, collator: None })) + .collect::>(); + + let claim_queue = cores + .iter() + .enumerate() + .map(|(idx, _core)| (CoreIndex::from(idx as u32), VecDeque::from([para_id]))) + .collect::>(); + + test_harness(|mut virtual_overseer| async move { + helpers::initialize_collator(&mut virtual_overseer, para_id).await; + helpers::activate_new_head(&mut virtual_overseer, activated_hash).await; + helpers::handle_runtime_calls_on_new_head_activation( + &mut virtual_overseer, + activated_hash, + AsyncBackingParams { max_candidate_depth: 1, allowed_ancestry_len: 1 }, + cores, + // Using latest runtime with the fancy claim queue exposed. + RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT, + claim_queue, + ) + .await; + + helpers::handle_cores_processing_for_a_leaf( + &mut virtual_overseer, + activated_hash, + para_id, + // `CoreState` is `Free` => `OccupiedCoreAssumption` is `Free` + OccupiedCoreAssumption::Free, + total_cores, + vec![], ) .await; @@ -765,11 +964,12 @@ fn no_collation_is_distributed_for_occupied_core_with_async_backing_disabled( candidate_hash: Default::default(), candidate_descriptor: dummy_candidate_descriptor(dummy_hash()), })]; - let claim_queue = BTreeMap::from([(CoreIndex::from(0), VecDeque::from([para_id]))]); + let claim_queue = BTreeMap::from([(CoreIndex::from(0), VecDeque::from([para_id]))]).into(); test_harness(|mut virtual_overseer| async move { helpers::initialize_collator(&mut virtual_overseer, para_id).await; helpers::activate_new_head(&mut virtual_overseer, activated_hash).await; + helpers::handle_runtime_calls_on_new_head_activation( &mut virtual_overseer, activated_hash, @@ -785,8 +985,38 @@ fn no_collation_is_distributed_for_occupied_core_with_async_backing_disabled( } mod helpers { + use polkadot_primitives::{ + async_backing::{Constraints, InboundHrmpLimitations}, + BlockNumber, + }; + use super::*; + // A set for dummy constraints for `ParaBackingState`` + pub(crate) fn dummy_constraints( + min_relay_parent_number: BlockNumber, + valid_watermarks: Vec, + required_parent: HeadData, + validation_code_hash: ValidationCodeHash, + ) -> Constraints { + Constraints { + min_relay_parent_number, + max_pov_size: 5 * 1024 * 1024, + max_code_size: 1_000_000, + ump_remaining: 10, + ump_remaining_bytes: 1_000, + max_ump_num_per_candidate: 10, + dmp_remaining_messages: vec![], + hrmp_inbound: InboundHrmpLimitations { valid_watermarks }, + hrmp_channels_out: vec![], + max_hrmp_num_per_candidate: 0, + required_parent, + validation_code_hash, + upgrade_restriction: None, + future_validation_code: None, + } + } + // Sends `Initialize` with a collator config pub async fn initialize_collator(virtual_overseer: &mut VirtualOverseer, para_id: ParaId) { virtual_overseer @@ -822,7 +1052,7 @@ mod helpers { async_backing_params: AsyncBackingParams, cores: Vec, runtime_version: u32, - claim_queue: BTreeMap>, + claim_queue: BTreeMap>, ) { assert_matches!( overseer_recv(virtual_overseer).await, @@ -876,7 +1106,7 @@ mod helpers { RuntimeApiRequest::ClaimQueue(tx), )) => { assert_eq!(hash, activated_hash); - let _ = tx.send(Ok(claim_queue)); + let _ = tx.send(Ok(claim_queue.into())); } ); } @@ -884,15 +1114,23 @@ mod helpers { // Handles all runtime requests performed in `handle_new_activations` for the case when a // collation should be prepared for the new leaf - pub async fn handle_core_processing_for_a_leaf( + pub async fn handle_cores_processing_for_a_leaf( virtual_overseer: &mut VirtualOverseer, activated_hash: Hash, para_id: ParaId, expected_occupied_core_assumption: OccupiedCoreAssumption, + cores_assigned: usize, + pending_availability: Vec, ) { + // Expect no messages if no cores is assigned to the para + if cores_assigned == 0 { + assert!(overseer_recv(virtual_overseer).timeout(TIMEOUT / 2).await.is_none()); + return + } + // Some hardcoded data - if needed, extract to parameters let validation_code_hash = ValidationCodeHash::from(Hash::repeat_byte(42)); - let parent_head = HeadData::from(vec![1, 2, 3]); + let parent_head = dummy_head_data(); let pvd = PersistedValidationData { parent_head: parent_head.clone(), relay_parent_number: 10, @@ -900,6 +1138,15 @@ mod helpers { max_pov_size: 1024, }; + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::ParaBackingState(p_id, tx)) + ) if parent == activated_hash && p_id == para_id => { + tx.send(Ok(Some(dummy_backing_state(pending_availability)))).unwrap(); + } + ); + assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request(hash, RuntimeApiRequest::PersistedValidationData(id, a, tx))) => { @@ -929,18 +1176,20 @@ mod helpers { } ); - assert_matches!( - overseer_recv(virtual_overseer).await, - AllMessages::CollatorProtocol(CollatorProtocolMessage::DistributeCollation{ - candidate_receipt, - parent_head_data_hash, - .. - }) => { - assert_eq!(parent_head_data_hash, parent_head.hash()); - assert_eq!(candidate_receipt.descriptor().persisted_validation_data_hash, pvd.hash()); - assert_eq!(candidate_receipt.descriptor().para_head, dummy_head_data().hash()); - assert_eq!(candidate_receipt.descriptor().validation_code_hash, validation_code_hash); - } - ); + for _ in 0..cores_assigned { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::CollatorProtocol(CollatorProtocolMessage::DistributeCollation{ + candidate_receipt, + parent_head_data_hash, + .. + }) => { + assert_eq!(parent_head_data_hash, parent_head.hash()); + assert_eq!(candidate_receipt.descriptor().persisted_validation_data_hash, pvd.hash()); + assert_eq!(candidate_receipt.descriptor().para_head, dummy_head_data().hash()); + assert_eq!(candidate_receipt.descriptor().validation_code_hash, validation_code_hash); + } + ); + } } } diff --git a/polkadot/node/core/approval-voting/Cargo.toml b/polkadot/node/core/approval-voting/Cargo.toml index 2a5b6198b9a827788696bdf5e1eeeb5829a9da6d..ced7706c40a288759dd0f5336a8389c60c8e533a 100644 --- a/polkadot/node/core/approval-voting/Cargo.toml +++ b/polkadot/node/core/approval-voting/Cargo.toml @@ -10,7 +10,7 @@ description = "Approval Voting Subsystem of the Polkadot node" workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } gum = { package = "tracing-gum", path = "../../gum" } @@ -41,7 +41,7 @@ rand_chacha = { version = "0.3.1" } rand = "0.8.5" [dev-dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" parking_lot = "0.12.1" sp-keyring = { path = "../../../../substrate/primitives/keyring" } sp-keystore = { path = "../../../../substrate/primitives/keystore" } @@ -52,4 +52,4 @@ assert_matches = "1.4.0" kvdb-memorydb = "0.13.0" test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../../primitives/test-helpers" } log = { workspace = true, default-features = true } -env_logger = "0.9.0" +env_logger = "0.11" diff --git a/polkadot/node/core/approval-voting/src/approval_db/v1/tests.rs b/polkadot/node/core/approval-voting/src/approval_db/v1/tests.rs index b979cb7ef45f6bf6af02acd8fde1d04e6bee8206..b0966ad01f7bff8242e3255968b3e85b4d0239ca 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/v1/tests.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/v1/tests.rs @@ -254,7 +254,7 @@ fn canonicalize_works() { // -> B1 -> C1 -> D1 // A -> B2 -> C2 -> D2 // - // We'll canonicalize C1. Everytning except D1 should disappear. + // We'll canonicalize C1. Everything except D1 should disappear. // // Candidates: // Cand1 in B2 diff --git a/polkadot/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs b/polkadot/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs index df6e4754dbd63ba81e736495297bfaf017a4e3dc..1081d79884f7594be12ebe6cee3047cb2b9feebe 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs @@ -79,7 +79,7 @@ pub fn v1_to_latest(db: Arc, config: Config) -> Result<()> { block.candidates().iter().enumerate() { // Loading the candidate will also perform the conversion to the updated format and - // return that represantation. + // return that representation. if let Some(candidate_entry) = backend .load_candidate_entry_v1(&candidate_hash, candidate_index as CandidateIndex) .map_err(|e| Error::InternalError(e))? diff --git a/polkadot/node/core/approval-voting/src/approval_db/v2/tests.rs b/polkadot/node/core/approval-voting/src/approval_db/v2/tests.rs index 6021b44c2765ff12a03e1ad85bfb8117c9fcfb03..5fa915add4166fa1f042dadaae063aaa83941b92 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/v2/tests.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/v2/tests.rs @@ -269,7 +269,7 @@ fn canonicalize_works() { // -> B1 -> C1 -> D1 // A -> B2 -> C2 -> D2 // - // We'll canonicalize C1. Everytning except D1 should disappear. + // We'll canonicalize C1. Everything except D1 should disappear. // // Candidates: // Cand1 in B2 diff --git a/polkadot/node/core/approval-voting/src/approval_db/v3/migration_helpers.rs b/polkadot/node/core/approval-voting/src/approval_db/v3/migration_helpers.rs index ad5e89ef3de84035ff3e9c79533edfc07bf8d4c5..d1e7ee08225bf96ceffab8842972cf53ed5725da 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/v3/migration_helpers.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/v3/migration_helpers.rs @@ -60,7 +60,7 @@ pub fn v2_to_latest(db: Arc, config: Config) -> Result<()> { block.candidates().iter().enumerate() { // Loading the candidate will also perform the conversion to the updated format and - // return that represantation. + // return that representation. if let Some(candidate_entry) = backend .load_candidate_entry_v2(&candidate_hash, candidate_index as CandidateIndex) .map_err(|e| Error::InternalError(e))? @@ -104,7 +104,7 @@ pub fn v1_to_latest_sanity_check( for block in all_blocks { for (_core_index, candidate_hash) in block.candidates() { // Loading the candidate will also perform the conversion to the updated format and - // return that represantation. + // return that representation. if let Some(candidate_entry) = backend.load_candidate_entry(&candidate_hash).unwrap() { candidates.insert(candidate_entry.candidate.hash()); } diff --git a/polkadot/node/core/approval-voting/src/approval_db/v3/tests.rs b/polkadot/node/core/approval-voting/src/approval_db/v3/tests.rs index 08c65461bca80aafb758e42c80b509bc37c47ece..7c0cf9d4f7daaccd1887a03a2a06e9bb4e6b587e 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/v3/tests.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/v3/tests.rs @@ -264,7 +264,7 @@ fn canonicalize_works() { // -> B1 -> C1 -> D1 // A -> B2 -> C2 -> D2 // - // We'll canonicalize C1. Everytning except D1 should disappear. + // We'll canonicalize C1. Everything except D1 should disappear. // // Candidates: // Cand1 in B2 diff --git a/polkadot/node/core/approval-voting/src/criteria.rs b/polkadot/node/core/approval-voting/src/criteria.rs index 1ebea2641b62198c291951448c8c552d9cfb34cf..57c0ac272dc5a5cad8b9c69831faa5ecfc778033 100644 --- a/polkadot/node/core/approval-voting/src/criteria.rs +++ b/polkadot/node/core/approval-voting/src/criteria.rs @@ -148,7 +148,7 @@ fn relay_vrf_modulo_cores( generate_samples(rand_chacha, num_samples as usize, max_cores as usize) } -/// Generates `num_sumples` randomly from (0..max_cores) range +/// Generates `num_samples` randomly from (0..max_cores) range /// /// Note! The algorithm can't change because validators on the other /// side won't be able to check the assignments until they update. diff --git a/polkadot/node/core/approval-voting/src/import.rs b/polkadot/node/core/approval-voting/src/import.rs index 7a56e9fd11293d1a96debd5e98b719a19c48045f..f4be42a4845080ce90fb87ddc838e2c1ab792ed1 100644 --- a/polkadot/node/core/approval-voting/src/import.rs +++ b/polkadot/node/core/approval-voting/src/import.rs @@ -45,8 +45,8 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_util::{determine_new_blocks, runtime::RuntimeInfo}; use polkadot_primitives::{ - vstaging::node_features, BlockNumber, CandidateEvent, CandidateHash, CandidateReceipt, - ConsensusLog, CoreIndex, GroupIndex, Hash, Header, SessionIndex, + node_features, BlockNumber, CandidateEvent, CandidateHash, CandidateReceipt, ConsensusLog, + CoreIndex, GroupIndex, Hash, Header, SessionIndex, }; use sc_keystore::LocalKeystore; use sp_consensus_slots::Slot; @@ -91,7 +91,7 @@ enum ImportedBlockInfoError { #[error(transparent)] RuntimeError(RuntimeApiError), - #[error("future cancalled while requesting {0}")] + #[error("future cancelled while requesting {0}")] FutureCancelled(&'static str, futures::channel::oneshot::Canceled), #[error(transparent)] @@ -619,8 +619,8 @@ pub(crate) mod tests { use polkadot_node_subsystem_test_helpers::make_subsystem_context; use polkadot_node_subsystem_util::database::Database; use polkadot_primitives::{ - vstaging::{node_features::FeatureIndex, NodeFeatures}, - ExecutorParams, Id as ParaId, IndexedVec, SessionInfo, ValidatorId, ValidatorIndex, + node_features::FeatureIndex, ExecutorParams, Id as ParaId, IndexedVec, NodeFeatures, + SessionInfo, ValidatorId, ValidatorIndex, }; pub(crate) use sp_consensus_babe::{ digests::{CompatibleDigestItem, PreDigest, SecondaryVRFPreDigest}, diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs index 1d7ab3eee21741befd72bfea591af6d2bfa7c4bf..57e9af4a518a5728424ad5c3dc50b2a46dc5aa07 100644 --- a/polkadot/node/core/approval-voting/src/lib.rs +++ b/polkadot/node/core/approval-voting/src/lib.rs @@ -54,10 +54,10 @@ use polkadot_node_subsystem_util::{ TimeoutExt, }; use polkadot_primitives::{ - vstaging::{ApprovalVoteMultipleCandidates, ApprovalVotingParams}, - BlockNumber, CandidateHash, CandidateIndex, CandidateReceipt, CoreIndex, DisputeStatement, - ExecutorParams, GroupIndex, Hash, PvfExecKind, SessionIndex, SessionInfo, - ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorPair, ValidatorSignature, + ApprovalVoteMultipleCandidates, ApprovalVotingParams, BlockNumber, CandidateHash, + CandidateIndex, CandidateReceipt, CoreIndex, DisputeStatement, ExecutorParams, GroupIndex, + Hash, PvfExecKind, SessionIndex, SessionInfo, ValidDisputeStatementKind, ValidatorId, + ValidatorIndex, ValidatorPair, ValidatorSignature, }; use sc_keystore::LocalKeystore; use sp_application_crypto::Pair; @@ -1285,10 +1285,10 @@ fn cores_to_candidate_indices( // Map from core index to candidate index. for claimed_core_index in core_indices.iter_ones() { - // Candidates are sorted by core index. - if let Ok(candidate_index) = block_entry + if let Some(candidate_index) = block_entry .candidates() - .binary_search_by_key(&(claimed_core_index as u32), |(core_index, _)| core_index.0) + .iter() + .position(|(core_index, _)| core_index.0 == claimed_core_index as u32) { candidate_indices.push(candidate_index as _); } @@ -1850,7 +1850,7 @@ async fn get_approval_signatures_for_candidate( gum::trace!( target: LOG_TARGET, ?candidate_hash, - "Spawning task for fetching sinatures from approval-distribution" + "Spawning task for fetching signatures from approval-distribution" ); ctx.spawn("get-approval-signatures", Box::pin(get_approvals)) } diff --git a/polkadot/node/core/approval-voting/src/persisted_entries.rs b/polkadot/node/core/approval-voting/src/persisted_entries.rs index b924a1b52ccf49a242adecfd534b498004ec3d87..6eeb99cb99ffa017492dba93e72556eab9104c8e 100644 --- a/polkadot/node/core/approval-voting/src/persisted_entries.rs +++ b/polkadot/node/core/approval-voting/src/persisted_entries.rs @@ -454,7 +454,7 @@ pub struct BlockEntry { slot: Slot, relay_vrf_story: RelayVRFStory, // The candidates included as-of this block and the index of the core they are - // leaving. Sorted ascending by core index. + // leaving. candidates: Vec<(CoreIndex, CandidateHash)>, // A bitfield where the i'th bit corresponds to the i'th candidate in `candidates`. // The i'th bit is `true` iff the candidate has been approved in the context of this diff --git a/polkadot/node/core/approval-voting/src/tests.rs b/polkadot/node/core/approval-voting/src/tests.rs index c9053232a4c8752f03134047dbc64b020377740a..f7bbbca4b8a1c0c5609898d714ad045dfd33f3ef 100644 --- a/polkadot/node/core/approval-voting/src/tests.rs +++ b/polkadot/node/core/approval-voting/src/tests.rs @@ -37,8 +37,8 @@ use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_util::TimeoutExt; use polkadot_overseer::HeadSupportsParachains; use polkadot_primitives::{ - vstaging::NodeFeatures, ApprovalVote, CandidateCommitments, CandidateEvent, CoreIndex, - GroupIndex, Header, Id as ParaId, IndexedVec, ValidationCode, ValidatorSignature, + ApprovalVote, CandidateCommitments, CandidateEvent, CoreIndex, GroupIndex, Header, + Id as ParaId, IndexedVec, NodeFeatures, ValidationCode, ValidatorSignature, }; use std::time::Duration; @@ -2479,6 +2479,173 @@ fn subsystem_import_checked_approval_sets_one_block_bit_at_a_time() { }); } +// See https://github.com/paritytech/polkadot-sdk/issues/3826 +#[test] +fn inclusion_events_can_be_unordered_by_core_index() { + let assignment_criteria = Box::new(MockAssignmentCriteria( + || { + let mut assignments = HashMap::new(); + for core in 0..3 { + let _ = assignments.insert( + CoreIndex(core), + approval_db::v2::OurAssignment { + cert: garbage_assignment_cert_v2( + AssignmentCertKindV2::RelayVRFModuloCompact { + core_bitfield: vec![CoreIndex(0), CoreIndex(1), CoreIndex(2)] + .try_into() + .unwrap(), + }, + ), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + } + assignments + }, + |_| Ok(0), + )); + let config = HarnessConfigBuilder::default().assignment_criteria(assignment_criteria).build(); + let store = config.backend(); + + test_harness(config, |test_harness| async move { + let TestHarness { + mut virtual_overseer, + clock, + sync_oracle_handle: _sync_oracle_handle, + .. + } = test_harness; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber(rx)) => { + rx.send(Ok(0)).unwrap(); + } + ); + + let block_hash = Hash::repeat_byte(0x01); + + let candidate_receipt0 = { + let mut receipt = dummy_candidate_receipt(block_hash); + receipt.descriptor.para_id = ParaId::from(0_u32); + receipt + }; + let candidate_receipt1 = { + let mut receipt = dummy_candidate_receipt(block_hash); + receipt.descriptor.para_id = ParaId::from(1_u32); + receipt + }; + let candidate_receipt2 = { + let mut receipt = dummy_candidate_receipt(block_hash); + receipt.descriptor.para_id = ParaId::from(2_u32); + receipt + }; + let candidate_index0 = 0; + let candidate_index1 = 1; + let candidate_index2 = 2; + + let validator0 = ValidatorIndex(0); + let validator1 = ValidatorIndex(1); + let validator2 = ValidatorIndex(2); + let validator3 = ValidatorIndex(3); + + let validators = vec![ + Sr25519Keyring::Alice, + Sr25519Keyring::Bob, + Sr25519Keyring::Charlie, + Sr25519Keyring::Dave, + Sr25519Keyring::Eve, + ]; + let session_info = SessionInfo { + validator_groups: IndexedVec::>::from(vec![ + vec![validator0, validator1], + vec![validator2], + vec![validator3], + ]), + needed_approvals: 1, + zeroth_delay_tranche_width: 1, + relay_vrf_modulo_samples: 1, + n_delay_tranches: 1, + no_show_slots: 1, + ..session_info(&validators) + }; + + ChainBuilder::new() + .add_block( + block_hash, + ChainBuilder::GENESIS_HASH, + 1, + BlockConfig { + slot: Slot::from(0), + candidates: Some(vec![ + (candidate_receipt0.clone(), CoreIndex(2), GroupIndex(2)), + (candidate_receipt1.clone(), CoreIndex(1), GroupIndex(0)), + (candidate_receipt2.clone(), CoreIndex(0), GroupIndex(1)), + ]), + session_info: Some(session_info), + end_syncing: true, + }, + ) + .build(&mut virtual_overseer) + .await; + + assert_eq!(clock.inner.lock().next_wakeup().unwrap(), 2); + clock.inner.lock().wakeup_all(100); + + assert_eq!(clock.inner.lock().wakeups.len(), 0); + + futures_timer::Delay::new(Duration::from_millis(100)).await; + + // Assignment is distributed only once from `approval-voting` + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + c_indices, + )) => { + assert_eq!(c_indices, vec![candidate_index0, candidate_index1, candidate_index2].try_into().unwrap()); + } + ); + + // Candidate 0 + recover_available_data(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + // Candidate 1 + recover_available_data(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + // Candidate 2 + recover_available_data(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + // Check if assignment was triggered for candidate 0. + let candidate_entry = + store.load_candidate_entry(&candidate_receipt0.hash()).unwrap().unwrap(); + let our_assignment = + candidate_entry.approval_entry(&block_hash).unwrap().our_assignment().unwrap(); + assert!(our_assignment.triggered()); + + // Check if assignment was triggered for candidate 1. + let candidate_entry = + store.load_candidate_entry(&candidate_receipt1.hash()).unwrap().unwrap(); + let our_assignment = + candidate_entry.approval_entry(&block_hash).unwrap().our_assignment().unwrap(); + assert!(our_assignment.triggered()); + + // Check if assignment was triggered for candidate 2. + let candidate_entry = + store.load_candidate_entry(&candidate_receipt2.hash()).unwrap().unwrap(); + let our_assignment = + candidate_entry.approval_entry(&block_hash).unwrap().our_assignment().unwrap(); + assert!(our_assignment.triggered()); + + virtual_overseer + }); +} + fn approved_ancestor_test( skip_approval: impl Fn(BlockNumber) -> bool, approved_height: BlockNumber, @@ -3154,7 +3321,7 @@ where // starting configuration. The relevant ticks (all scheduled wakeups) are printed after no further // ticks are scheduled. To create a valid test, a prefix of the relevant ticks should be included // in the final test configuration, ending at the tick with the desired inputs to -// should_trigger_assignemnt. +// should_trigger_assignment. async fn step_until_done(clock: &MockClock) { let mut relevant_ticks = Vec::new(); loop { @@ -3837,7 +4004,7 @@ fn test_approval_is_sent_on_max_approval_coalesce_count() { async fn handle_approval_on_max_coalesce_count( virtual_overseer: &mut VirtualOverseer, - candidate_indicies: Vec, + candidate_indices: Vec, ) { assert_matches!( overseer_recv(virtual_overseer).await, @@ -3845,16 +4012,16 @@ async fn handle_approval_on_max_coalesce_count( _, c_indices, )) => { - assert_eq!(TryInto::::try_into(candidate_indicies.clone()).unwrap(), c_indices); + assert_eq!(TryInto::::try_into(candidate_indices.clone()).unwrap(), c_indices); } ); - for _ in &candidate_indicies { + for _ in &candidate_indices { recover_available_data(virtual_overseer).await; fetch_validation_code(virtual_overseer).await; } - for _ in &candidate_indicies { + for _ in &candidate_indices { assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive{exec_kind, response_sender, ..}) if exec_kind == PvfExecKind::Approval => { @@ -3885,7 +4052,7 @@ async fn handle_approval_on_max_coalesce_count( assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(vote)) => { - assert_eq!(TryInto::::try_into(candidate_indicies).unwrap(), vote.candidate_indices); + assert_eq!(TryInto::::try_into(candidate_indices).unwrap(), vote.candidate_indices); } ); @@ -3895,7 +4062,7 @@ async fn handle_approval_on_max_coalesce_count( async fn handle_approval_on_max_wait_time( virtual_overseer: &mut VirtualOverseer, - candidate_indicies: Vec, + candidate_indices: Vec, clock: Box, ) { const TICK_NOW_BEGIN: u64 = 1; @@ -3909,16 +4076,16 @@ async fn handle_approval_on_max_wait_time( _, c_indices, )) => { - assert_eq!(TryInto::::try_into(candidate_indicies.clone()).unwrap(), c_indices); + assert_eq!(TryInto::::try_into(candidate_indices.clone()).unwrap(), c_indices); } ); - for _ in &candidate_indicies { + for _ in &candidate_indices { recover_available_data(virtual_overseer).await; fetch_validation_code(virtual_overseer).await; } - for _ in &candidate_indicies { + for _ in &candidate_indices { assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive{exec_kind, response_sender, ..}) if exec_kind == PvfExecKind::Approval => { @@ -3978,7 +4145,7 @@ async fn handle_approval_on_max_wait_time( assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(vote)) => { - assert_eq!(TryInto::::try_into(candidate_indicies).unwrap(), vote.candidate_indices); + assert_eq!(TryInto::::try_into(candidate_indices).unwrap(), vote.candidate_indices); } ); @@ -4319,7 +4486,7 @@ fn subsystem_relaunches_approval_work_on_restart() { virtual_overseer }); - // Restart a new approval voting subsystem with the same database and major syncing true untill + // Restart a new approval voting subsystem with the same database and major syncing true until // the last leaf. let config = HarnessConfigBuilder::default().backend(store_clone).major_syncing(true).build(); diff --git a/polkadot/node/core/approval-voting/src/time.rs b/polkadot/node/core/approval-voting/src/time.rs index 99dfbe07678f2f0956565e92ffaf196b6f482af9..5c3e7e85a17a212eeae2e8b6b9b7cf6dbecde036 100644 --- a/polkadot/node/core/approval-voting/src/time.rs +++ b/polkadot/node/core/approval-voting/src/time.rs @@ -126,13 +126,13 @@ impl DelayedApprovalTimer { /// no additional timer is started. pub(crate) fn maybe_arm_timer( &mut self, - wait_untill: Tick, + wait_until: Tick, clock: &dyn Clock, block_hash: Hash, validator_index: ValidatorIndex, ) { if self.blocks.insert(block_hash) { - let clock_wait = clock.wait(wait_untill); + let clock_wait = clock.wait(wait_until); self.timers.push(Box::pin(async move { clock_wait.await; (block_hash, validator_index) diff --git a/polkadot/node/core/av-store/Cargo.toml b/polkadot/node/core/av-store/Cargo.toml index 05212da7479848830a8fb70ee4c38d449e9a6a7e..bc9b979228a1db8c4adfc01bd2d60470721bfcac 100644 --- a/polkadot/node/core/av-store/Cargo.toml +++ b/polkadot/node/core/av-store/Cargo.toml @@ -10,7 +10,7 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" kvdb = "0.13.0" thiserror = { workspace = true } @@ -29,7 +29,7 @@ polkadot-node-jaeger = { path = "../../jaeger" } [dev-dependencies] log = { workspace = true, default-features = true } -env_logger = "0.9.0" +env_logger = "0.11" assert_matches = "1.4.0" kvdb-memorydb = "0.13.0" diff --git a/polkadot/node/core/av-store/src/lib.rs b/polkadot/node/core/av-store/src/lib.rs index ef7dcecac0755084efbba839d12b532ff0f336cb..68db4686a9740bb052f7d05031d148cd5b05e0a2 100644 --- a/polkadot/node/core/av-store/src/lib.rs +++ b/polkadot/node/core/av-store/src/lib.rs @@ -1218,7 +1218,7 @@ fn process_message( // tx channel is dropped and that error is caught by the caller subsystem. // // We bubble up the specific error here so `av-store` logs still tell what - // happend. + // happened. return Err(e.into()) }, } diff --git a/polkadot/node/core/backing/Cargo.toml b/polkadot/node/core/backing/Cargo.toml index d0c1f9aa4832dfe629e1fb20d3082f27c260a7d5..26fa54470fbda6c1e5a22999a34022173b003fe6 100644 --- a/polkadot/node/core/backing/Cargo.toml +++ b/polkadot/node/core/backing/Cargo.toml @@ -10,7 +10,7 @@ description = "The Candidate Backing Subsystem. Tracks parachain candidates that workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" sp-keystore = { path = "../../../../substrate/primitives/keystore" } polkadot-primitives = { path = "../../../primitives" } polkadot-node-primitives = { path = "../../primitives" } @@ -30,7 +30,7 @@ sp-application-crypto = { path = "../../../../substrate/primitives/application-c sp-keyring = { path = "../../../../substrate/primitives/keyring" } sc-keystore = { path = "../../../../substrate/client/keystore" } sp-tracing = { path = "../../../../substrate/primitives/tracing" } -futures = { version = "0.3.21", features = ["thread-pool"] } +futures = { version = "0.3.30", features = ["thread-pool"] } assert_matches = "1.4.0" rstest = "0.18.2" polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } diff --git a/polkadot/node/core/backing/src/error.rs b/polkadot/node/core/backing/src/error.rs index 64955a393962076bd7b202dd31a7d69d46a4d1b9..52684f3fe3063e95dcab48947cf86ca62c321372 100644 --- a/polkadot/node/core/backing/src/error.rs +++ b/polkadot/node/core/backing/src/error.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +use std::collections::HashMap; + use fatality::Nested; use futures::channel::{mpsc, oneshot}; @@ -24,7 +26,7 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_util::{runtime, Error as UtilError}; use polkadot_primitives::{BackedCandidate, ValidationCodeHash}; -use crate::LOG_TARGET; +use crate::{ParaId, LOG_TARGET}; pub type Result = std::result::Result; pub type FatalResult = std::result::Result; @@ -55,7 +57,7 @@ pub enum Error { InvalidSignature, #[error("Failed to send candidates {0:?}")] - Send(Vec), + Send(HashMap>), #[error("FetchPoV failed")] FetchPoV, diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index 69bf2e956a0f6bb7f3b9f4a19446e5e7d2c125bb..23acb0450944e8eedea437da545830174ed84449 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -105,12 +105,11 @@ use polkadot_node_subsystem_util::{ Validator, }; use polkadot_primitives::{ - vstaging::{node_features::FeatureIndex, NodeFeatures}, - BackedCandidate, CandidateCommitments, CandidateHash, CandidateReceipt, - CommittedCandidateReceipt, CoreIndex, CoreState, ExecutorParams, GroupIndex, GroupRotationInfo, - Hash, Id as ParaId, IndexedVec, PersistedValidationData, PvfExecKind, SessionIndex, - SigningContext, ValidationCode, ValidatorId, ValidatorIndex, ValidatorSignature, - ValidityAttestation, + node_features::FeatureIndex, BackedCandidate, CandidateCommitments, CandidateHash, + CandidateReceipt, CommittedCandidateReceipt, CoreIndex, CoreState, ExecutorParams, GroupIndex, + GroupRotationInfo, Hash, Id as ParaId, IndexedVec, NodeFeatures, PersistedValidationData, + PvfExecKind, SessionIndex, SigningContext, ValidationCode, ValidatorId, ValidatorIndex, + ValidatorSignature, ValidityAttestation, }; use sp_keystore::KeystorePtr; use statement_table::{ @@ -292,7 +291,7 @@ struct State { /// Cache the per-session Validator->Group mapping. validator_to_group_cache: LruMap>>>, - /// A cloneable sender which is dispatched to background candidate validation tasks to inform + /// A clonable sender which is dispatched to background candidate validation tasks to inform /// the main task of the result. background_validation_tx: mpsc::Sender<(Hash, ValidatedCandidateCommand)>, /// The handle to the keystore used for signing. @@ -911,7 +910,7 @@ async fn handle_active_leaves_update( } let mut seconded_at_depth = HashMap::new(); - if let Some(response) = membership_answers.next().await { + while let Some(response) = membership_answers.next().await { match response { Err(oneshot::Canceled) => { gum::warn!( @@ -2231,15 +2230,16 @@ async fn handle_statement_message( fn handle_get_backed_candidates_message( state: &State, - requested_candidates: Vec<(CandidateHash, Hash)>, - tx: oneshot::Sender>, + requested_candidates: HashMap>, + tx: oneshot::Sender>>, metrics: &Metrics, ) -> Result<(), Error> { let _timer = metrics.time_get_backed_candidates(); - let backed = requested_candidates - .into_iter() - .filter_map(|(candidate_hash, relay_parent)| { + let mut backed = HashMap::with_capacity(requested_candidates.len()); + + for (para_id, para_candidates) in requested_candidates { + for (candidate_hash, relay_parent) in para_candidates.iter() { let rp_state = match state.per_relay_parent.get(&relay_parent) { Some(rp_state) => rp_state, None => { @@ -2249,13 +2249,13 @@ fn handle_get_backed_candidates_message( ?candidate_hash, "Requested candidate's relay parent is out of view", ); - return None + break }, }; - rp_state + let maybe_backed_candidate = rp_state .table .attested_candidate( - &candidate_hash, + candidate_hash, &rp_state.table_context, rp_state.minimum_backing_votes, ) @@ -2265,9 +2265,18 @@ fn handle_get_backed_candidates_message( &rp_state.table_context, rp_state.inject_core_index, ) - }) - }) - .collect(); + }); + + if let Some(backed_candidate) = maybe_backed_candidate { + backed + .entry(para_id) + .or_insert_with(|| Vec::with_capacity(para_candidates.len())) + .push(backed_candidate); + } else { + break + } + } + } tx.send(backed).map_err(|data| Error::Send(data))?; Ok(()) diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs index e3cc5727435a3fef3532a9de342cd194ecd4fe3a..d1969e656db673b70b070c9b708bab867bec9fcb 100644 --- a/polkadot/node/core/backing/src/tests/mod.rs +++ b/polkadot/node/core/backing/src/tests/mod.rs @@ -33,8 +33,8 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_primitives::{ - vstaging::node_features, CandidateDescriptor, GroupRotationInfo, HeadData, - PersistedValidationData, PvfExecKind, ScheduledCore, SessionIndex, LEGACY_MIN_BACKING_VOTES, + node_features, CandidateDescriptor, GroupRotationInfo, HeadData, PersistedValidationData, + PvfExecKind, ScheduledCore, SessionIndex, LEGACY_MIN_BACKING_VOTES, }; use rstest::rstest; use sp_application_crypto::AppCrypto; @@ -663,13 +663,19 @@ fn backing_works(#[case] elastic_scaling_mvp: bool) { let (tx, rx) = oneshot::channel(); let msg = CandidateBackingMessage::GetBackedCandidates( - vec![(candidate_a_hash, test_state.relay_parent)], + std::iter::once(( + test_state.chain_ids[0], + vec![(candidate_a_hash, test_state.relay_parent)], + )) + .collect(), tx, ); virtual_overseer.send(FromOrchestra::Communication { msg }).await; - let candidates = rx.await.unwrap(); + let mut candidates = rx.await.unwrap(); + assert_eq!(1, candidates.len()); + let candidates = candidates.remove(&test_state.chain_ids[0]).unwrap(); assert_eq!(1, candidates.len()); assert_eq!(candidates[0].validity_votes().len(), 3); @@ -695,6 +701,323 @@ fn backing_works(#[case] elastic_scaling_mvp: bool) { }); } +#[test] +fn get_backed_candidate_preserves_order() { + let mut test_state = TestState::default(); + test_state + .node_features + .resize((node_features::FeatureIndex::ElasticScalingMVP as u8 + 1) as usize, false); + test_state + .node_features + .set(node_features::FeatureIndex::ElasticScalingMVP as u8 as usize, true); + + // Set a single validator as the first validator group. It simplifies the test. + test_state.validator_groups.0[0] = vec![ValidatorIndex(2)]; + // Add another validator group for the third core. + test_state.validator_groups.0.push(vec![ValidatorIndex(3)]); + // Assign the second core to the same para as the first one. + test_state.availability_cores[1] = + CoreState::Scheduled(ScheduledCore { para_id: test_state.chain_ids[0], collator: None }); + // Add another availability core for paraid 2. + test_state.availability_cores.push(CoreState::Scheduled(ScheduledCore { + para_id: test_state.chain_ids[1], + collator: None, + })); + + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + test_startup(&mut virtual_overseer, &test_state).await; + + let pov_a = PoV { block_data: BlockData(vec![1, 2, 3]) }; + let pov_b = PoV { block_data: BlockData(vec![3, 4, 5]) }; + let pov_c = PoV { block_data: BlockData(vec![5, 6, 7]) }; + let validation_code_ab = ValidationCode(vec![1, 2, 3]); + let validation_code_c = ValidationCode(vec![4, 5, 6]); + + let parent_head_data_a = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); + let parent_head_data_b = { + let mut head = parent_head_data_a.clone(); + head.0[0] = 98; + head + }; + let output_head_data_b = { + let mut head = parent_head_data_a.clone(); + head.0[0] = 99; + head + }; + let parent_head_data_c = test_state.head_data.get(&test_state.chain_ids[1]).unwrap(); + let output_head_data_c = { + let mut head = parent_head_data_c.clone(); + head.0[0] = 97; + head + }; + + let pvd_a = PersistedValidationData { + parent_head: parent_head_data_a.clone(), + relay_parent_number: 0_u32.into(), + max_pov_size: 1024, + relay_parent_storage_root: dummy_hash(), + }; + let pvd_b = PersistedValidationData { + parent_head: parent_head_data_b.clone(), + relay_parent_number: 0_u32.into(), + max_pov_size: 1024, + relay_parent_storage_root: dummy_hash(), + }; + let pvd_c = PersistedValidationData { + parent_head: parent_head_data_c.clone(), + relay_parent_number: 0_u32.into(), + max_pov_size: 1024, + relay_parent_storage_root: dummy_hash(), + }; + + let candidate_a = TestCandidateBuilder { + para_id: test_state.chain_ids[0], + relay_parent: test_state.relay_parent, + pov_hash: pov_a.hash(), + head_data: parent_head_data_b.clone(), + erasure_root: make_erasure_root(&test_state, pov_a.clone(), pvd_a.clone()), + validation_code: validation_code_ab.0.clone(), + persisted_validation_data_hash: pvd_a.hash(), + } + .build(); + let candidate_b = TestCandidateBuilder { + para_id: test_state.chain_ids[0], + relay_parent: test_state.relay_parent, + pov_hash: pov_b.hash(), + head_data: output_head_data_b.clone(), + erasure_root: make_erasure_root(&test_state, pov_b.clone(), pvd_b.clone()), + validation_code: validation_code_ab.0.clone(), + persisted_validation_data_hash: pvd_b.hash(), + } + .build(); + let candidate_c = TestCandidateBuilder { + para_id: test_state.chain_ids[1], + relay_parent: test_state.relay_parent, + pov_hash: pov_c.hash(), + head_data: output_head_data_c.clone(), + erasure_root: make_erasure_root(&test_state, pov_b.clone(), pvd_c.clone()), + validation_code: validation_code_c.0.clone(), + persisted_validation_data_hash: pvd_c.hash(), + } + .build(); + let candidate_a_hash = candidate_a.hash(); + let candidate_b_hash = candidate_b.hash(); + let candidate_c_hash = candidate_c.hash(); + + // Back a chain of two candidates for the first paraid. Back one candidate for the second + // paraid. + for (candidate, pvd, validator_index) in [ + (candidate_a, pvd_a, ValidatorIndex(2)), + (candidate_b, pvd_b, ValidatorIndex(1)), + (candidate_c, pvd_c, ValidatorIndex(3)), + ] { + let public = Keystore::sr25519_generate_new( + &*test_state.keystore, + ValidatorId::ID, + Some(&test_state.validators[validator_index.0 as usize].to_seed()), + ) + .expect("Insert key into keystore"); + + let signed = SignedFullStatementWithPVD::sign( + &test_state.keystore, + StatementWithPVD::Seconded(candidate.clone(), pvd.clone()), + &test_state.signing_context, + validator_index, + &public.into(), + ) + .ok() + .flatten() + .expect("should be signed"); + + let statement = + CandidateBackingMessage::Statement(test_state.relay_parent, signed.clone()); + + virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::Provisioner( + ProvisionerMessage::ProvisionableData( + _, + ProvisionableData::BackedCandidate(candidate_receipt) + ) + ) => { + assert_eq!(candidate_receipt, candidate.to_plain()); + } + ); + } + + // Happy case, all candidates should be present. + let (tx, rx) = oneshot::channel(); + let msg = CandidateBackingMessage::GetBackedCandidates( + [ + ( + test_state.chain_ids[0], + vec![ + (candidate_a_hash, test_state.relay_parent), + (candidate_b_hash, test_state.relay_parent), + ], + ), + (test_state.chain_ids[1], vec![(candidate_c_hash, test_state.relay_parent)]), + ] + .into_iter() + .collect(), + tx, + ); + virtual_overseer.send(FromOrchestra::Communication { msg }).await; + let mut candidates = rx.await.unwrap(); + assert_eq!(2, candidates.len()); + assert_eq!( + candidates + .remove(&test_state.chain_ids[0]) + .unwrap() + .iter() + .map(|c| c.hash()) + .collect::>(), + vec![candidate_a_hash, candidate_b_hash] + ); + assert_eq!( + candidates + .remove(&test_state.chain_ids[1]) + .unwrap() + .iter() + .map(|c| c.hash()) + .collect::>(), + vec![candidate_c_hash] + ); + + // The first candidate of the first para is invalid (we supply the wrong relay parent or a + // wrong candidate hash). No candidates should be returned for paraid 1. ParaId 2 should be + // fine. + for candidates in [ + vec![ + (candidate_a_hash, Hash::repeat_byte(9)), + (candidate_b_hash, test_state.relay_parent), + ], + vec![ + (CandidateHash(Hash::repeat_byte(9)), test_state.relay_parent), + (candidate_b_hash, test_state.relay_parent), + ], + ] { + let (tx, rx) = oneshot::channel(); + let msg = CandidateBackingMessage::GetBackedCandidates( + [ + (test_state.chain_ids[0], candidates), + (test_state.chain_ids[1], vec![(candidate_c_hash, test_state.relay_parent)]), + ] + .into_iter() + .collect(), + tx, + ); + virtual_overseer.send(FromOrchestra::Communication { msg }).await; + let mut candidates = rx.await.unwrap(); + assert_eq!(candidates.len(), 1); + + assert!(candidates.remove(&test_state.chain_ids[0]).is_none()); + assert_eq!( + candidates + .remove(&test_state.chain_ids[1]) + .unwrap() + .iter() + .map(|c| c.hash()) + .collect::>(), + vec![candidate_c_hash] + ); + } + + // The second candidate of the first para is invalid (we supply the wrong relay parent or a + // wrong candidate hash). The first candidate of the first para should still be present. + // ParaId 2 is fine. + for candidates in [ + vec![ + (candidate_a_hash, test_state.relay_parent), + (candidate_b_hash, Hash::repeat_byte(9)), + ], + vec![ + (candidate_a_hash, test_state.relay_parent), + (CandidateHash(Hash::repeat_byte(9)), test_state.relay_parent), + ], + ] { + let (tx, rx) = oneshot::channel(); + let msg = CandidateBackingMessage::GetBackedCandidates( + [ + (test_state.chain_ids[0], candidates), + (test_state.chain_ids[1], vec![(candidate_c_hash, test_state.relay_parent)]), + ] + .into_iter() + .collect(), + tx, + ); + virtual_overseer.send(FromOrchestra::Communication { msg }).await; + let mut candidates = rx.await.unwrap(); + assert_eq!(2, candidates.len()); + assert_eq!( + candidates + .remove(&test_state.chain_ids[0]) + .unwrap() + .iter() + .map(|c| c.hash()) + .collect::>(), + vec![candidate_a_hash] + ); + assert_eq!( + candidates + .remove(&test_state.chain_ids[1]) + .unwrap() + .iter() + .map(|c| c.hash()) + .collect::>(), + vec![candidate_c_hash] + ); + } + + // Both candidates of para id 1 are invalid (we supply the wrong relay parent or a wrong + // candidate hash). No candidates should be returned for para id 1. Para Id 2 is fine. + for candidates in [ + vec![ + (CandidateHash(Hash::repeat_byte(9)), test_state.relay_parent), + (CandidateHash(Hash::repeat_byte(10)), test_state.relay_parent), + ], + vec![ + (candidate_a_hash, Hash::repeat_byte(9)), + (candidate_b_hash, Hash::repeat_byte(10)), + ], + ] { + let (tx, rx) = oneshot::channel(); + let msg = CandidateBackingMessage::GetBackedCandidates( + [ + (test_state.chain_ids[0], candidates), + (test_state.chain_ids[1], vec![(candidate_c_hash, test_state.relay_parent)]), + ] + .into_iter() + .collect(), + tx, + ); + virtual_overseer.send(FromOrchestra::Communication { msg }).await; + let mut candidates = rx.await.unwrap(); + assert_eq!(candidates.len(), 1); + + assert!(candidates.remove(&test_state.chain_ids[0]).is_none()); + assert_eq!( + candidates + .remove(&test_state.chain_ids[1]) + .unwrap() + .iter() + .map(|c| c.hash()) + .collect::>(), + vec![candidate_c_hash] + ); + } + + virtual_overseer + .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves( + ActiveLeavesUpdate::stop_work(test_state.relay_parent), + ))) + .await; + virtual_overseer + }); +} + #[test] fn extract_core_index_from_statement_works() { let test_state = TestState::default(); @@ -950,13 +1273,19 @@ fn backing_works_while_validation_ongoing() { let (tx, rx) = oneshot::channel(); let msg = CandidateBackingMessage::GetBackedCandidates( - vec![(candidate_a.hash(), test_state.relay_parent)], + std::iter::once(( + test_state.chain_ids[0], + vec![(candidate_a.hash(), test_state.relay_parent)], + )) + .collect(), tx, ); virtual_overseer.send(FromOrchestra::Communication { msg }).await; - let candidates = rx.await.unwrap(); + let mut candidates = rx.await.unwrap(); + assert_eq!(candidates.len(), 1); + let candidates = candidates.remove(&test_state.chain_ids[0]).unwrap(); assert_eq!(1, candidates.len()); assert_eq!(candidates[0].validity_votes().len(), 3); @@ -1565,7 +1894,11 @@ fn backing_works_after_failed_validation() { // and check that it is still alive. let (tx, rx) = oneshot::channel(); let msg = CandidateBackingMessage::GetBackedCandidates( - vec![(candidate.hash(), test_state.relay_parent)], + std::iter::once(( + test_state.chain_ids[0], + vec![(candidate.hash(), test_state.relay_parent)], + )) + .collect(), tx, ); diff --git a/polkadot/node/core/bitfield-signing/Cargo.toml b/polkadot/node/core/bitfield-signing/Cargo.toml index 6ecfffd7249b9213cc4ffb86f84a38e1cdb9babf..0663e0f1b699bbf44e0ecd51dcf646bc27966a2c 100644 --- a/polkadot/node/core/bitfield-signing/Cargo.toml +++ b/polkadot/node/core/bitfield-signing/Cargo.toml @@ -10,7 +10,7 @@ description = "Bitfield signing subsystem for the Polkadot node" workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" gum = { package = "tracing-gum", path = "../../gum" } polkadot-primitives = { path = "../../../primitives" } polkadot-node-subsystem = { path = "../../subsystem" } diff --git a/polkadot/node/core/candidate-validation/Cargo.toml b/polkadot/node/core/candidate-validation/Cargo.toml index 15fc8c940d338c9aab1eb4d23e98427a597a2df1..0cf4707aad29b3931cab7d65005799cf58c8c38e 100644 --- a/polkadot/node/core/candidate-validation/Cargo.toml +++ b/polkadot/node/core/candidate-validation/Cargo.toml @@ -10,8 +10,8 @@ license.workspace = true workspace = true [dependencies] -async-trait = "0.1.74" -futures = "0.3.21" +async-trait = "0.1.79" +futures = "0.3.30" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../../gum" } @@ -31,7 +31,7 @@ polkadot-node-core-pvf = { path = "../pvf" } [dev-dependencies] sp-keyring = { path = "../../../../substrate/primitives/keyring" } -futures = { version = "0.3.21", features = ["thread-pool"] } +futures = { version = "0.3.30", features = ["thread-pool"] } assert_matches = "1.4.0" polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } sp-core = { path = "../../../../substrate/primitives/core" } diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index 8237137fdca015ace87248d062431230a2ad47d4..ec24434db24c30713e106b99b3238f997ba4765f 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -617,7 +617,7 @@ async fn validate_candidate_exhaustive( Err(e) => { gum::info!(target: LOG_TARGET, ?para_id, err=?e, "Invalid candidate (validation code)"); - // Code already passed pre-checking, if decompression fails now this most likley means + // Code already passed pre-checking, if decompression fails now this most likely means // some local corruption happened. return Err(ValidationFailed("Code decompression failed".to_string())) }, diff --git a/polkadot/node/core/chain-api/Cargo.toml b/polkadot/node/core/chain-api/Cargo.toml index 9aa017ecba3d27c8f85c8055a1fc784d38efbfbd..f4d02d3f47b23ac4e71fa44c790bf4b0d1538902 100644 --- a/polkadot/node/core/chain-api/Cargo.toml +++ b/polkadot/node/core/chain-api/Cargo.toml @@ -10,7 +10,7 @@ description = "The Chain API subsystem provides access to chain related utility workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" gum = { package = "tracing-gum", path = "../../gum" } polkadot-node-metrics = { path = "../../metrics" } polkadot-node-subsystem = { path = "../../subsystem" } @@ -19,7 +19,7 @@ sc-client-api = { path = "../../../../substrate/client/api" } sc-consensus-babe = { path = "../../../../substrate/client/consensus/babe" } [dev-dependencies] -futures = { version = "0.3.21", features = ["thread-pool"] } +futures = { version = "0.3.30", features = ["thread-pool"] } maplit = "1.0.2" parity-scale-codec = "3.6.1" polkadot-node-primitives = { path = "../../primitives" } diff --git a/polkadot/node/core/chain-selection/Cargo.toml b/polkadot/node/core/chain-selection/Cargo.toml index 96fd42785cdd84ee463146f095b06ea01cc7b491..318f27a43086e4c6bee0bf0a0b18a78c9cf525be 100644 --- a/polkadot/node/core/chain-selection/Cargo.toml +++ b/polkadot/node/core/chain-selection/Cargo.toml @@ -10,7 +10,7 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3" gum = { package = "tracing-gum", path = "../../gum" } polkadot-primitives = { path = "../../../primitives" } diff --git a/polkadot/node/core/chain-selection/src/lib.rs b/polkadot/node/core/chain-selection/src/lib.rs index aa5bb9548ad2484b3e0bdc0499b250335ea57f35..6f864fefb6110184233d52620dea923f8f0b1e4a 100644 --- a/polkadot/node/core/chain-selection/src/lib.rs +++ b/polkadot/node/core/chain-selection/src/lib.rs @@ -51,7 +51,7 @@ type Timestamp = u64; // If a block isn't approved in 120 seconds, nodes will abandon it // and begin building on another chain. const STAGNANT_TIMEOUT: Timestamp = 120; -// Delay prunning of the stagnant keys in prune only mode by 25 hours to avoid interception with the +// Delay pruning of the stagnant keys in prune only mode by 25 hours to avoid interception with the // finality const STAGNANT_PRUNE_DELAY: Timestamp = 25 * 60 * 60; // Maximum number of stagnant entries cleaned during one `STAGNANT_TIMEOUT` iteration @@ -237,7 +237,7 @@ impl Clock for SystemClock { // // The exact time that a block becomes stagnant in the local node is always expected // to differ from other nodes due to network asynchrony and delays in block propagation. - // Non-monotonicity exarcerbates that somewhat, but not meaningfully. + // Non-monotonicity exacerbates that somewhat, but not meaningfully. match SystemTime::now().duration_since(UNIX_EPOCH) { Ok(d) => d.as_secs(), diff --git a/polkadot/node/core/chain-selection/src/tests.rs b/polkadot/node/core/chain-selection/src/tests.rs index cf021c0efeb0638b5fe1db4ef5dbe9dd4c7766f6..bc998f268a0da6a4ca3703f929be3e1c19677b35 100644 --- a/polkadot/node/core/chain-selection/src/tests.rs +++ b/polkadot/node/core/chain-selection/src/tests.rs @@ -406,7 +406,7 @@ async fn import_chains_into_empty( // some pre-blocks may need to be supplied to answer ancestry requests // that gather batches beyond the beginning of the new chain. // pre-blocks are those already known by the subsystem, however, -// the subsystem has no way of knowin that until requesting ancestry. +// the subsystem has no way of knowing that until requesting ancestry. async fn import_all_blocks_into( virtual_overseer: &mut VirtualOverseer, backend: &TestBackend, @@ -1300,7 +1300,7 @@ fn finalize_erases_unviable_from_one_but_not_all_reverts() { // F <- A1 <- A2 <- A3 // // A3 reverts A2 and A1. - // Finalize A1. A2 is stil unviable. + // Finalize A1. A2 is still unviable. let (a3_hash, chain_a) = construct_chain_on_base(vec![1, 2, 3], finalized_number, finalized_hash, |h| { diff --git a/polkadot/node/core/dispute-coordinator/Cargo.toml b/polkadot/node/core/dispute-coordinator/Cargo.toml index 1fff0a77170669753fa3fd8d852f936446fe7264..cd3238449bea9d19ec65f418237282523ff3448a 100644 --- a/polkadot/node/core/dispute-coordinator/Cargo.toml +++ b/polkadot/node/core/dispute-coordinator/Cargo.toml @@ -10,7 +10,7 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" gum = { package = "tracing-gum", path = "../../gum" } parity-scale-codec = "3.6.1" kvdb = "0.13.0" diff --git a/polkadot/node/core/dispute-coordinator/src/db/v1.rs b/polkadot/node/core/dispute-coordinator/src/db/v1.rs index f0f17d2325d68bb1d70f40a2a761bdf5f35ebcfb..4950765cf510c047b811f50d0459c0add4c75d78 100644 --- a/polkadot/node/core/dispute-coordinator/src/db/v1.rs +++ b/polkadot/node/core/dispute-coordinator/src/db/v1.rs @@ -341,7 +341,7 @@ pub(crate) fn note_earliest_session( let lower_bound = (new_earliest_session, CandidateHash(Hash::repeat_byte(0x00))); let new_recent_disputes = recent_disputes.split_off(&lower_bound); - // Any remanining disputes are considered ancient and must be pruned. + // Any remaining disputes are considered ancient and must be pruned. let pruned_disputes = recent_disputes; if pruned_disputes.len() != 0 { diff --git a/polkadot/node/core/dispute-coordinator/src/lib.rs b/polkadot/node/core/dispute-coordinator/src/lib.rs index 4b511e7430af655303a7bb6e5ca0e86d0f8e8c7b..daa384b36ffbaf2c8d3c004e35eb537a3c33e4c0 100644 --- a/polkadot/node/core/dispute-coordinator/src/lib.rs +++ b/polkadot/node/core/dispute-coordinator/src/lib.rs @@ -462,7 +462,7 @@ async fn wait_for_first_leaf(ctx: &mut Context) -> Result { // Rare case where same candidate was present on multiple heights, but all are // pruned at the same time. This candidate was already pruned in the previous - // occurence so it is skipped now. + // occurrence so it is skipped now. }, Entry::Occupied(mut e) => { let mut blocks_including = std::mem::take(e.get_mut()); diff --git a/polkadot/node/core/dispute-coordinator/src/scraping/tests.rs b/polkadot/node/core/dispute-coordinator/src/scraping/tests.rs index 748f9a16f493e2bce191091638771aa5be57fa9f..726dda596d7b8be26da55358c95c9080ec89d962 100644 --- a/polkadot/node/core/dispute-coordinator/src/scraping/tests.rs +++ b/polkadot/node/core/dispute-coordinator/src/scraping/tests.rs @@ -542,8 +542,8 @@ fn scraper_handles_backed_but_not_included_candidate() { } #[test] -fn scraper_handles_the_same_candidate_incuded_in_two_different_block_heights() { - // Same candidate will be inclued in these two leaves +fn scraper_handles_the_same_candidate_included_in_two_different_block_heights() { + // Same candidate will be included in these two leaves let test_targets = vec![2, 3]; // How many blocks should we skip before sending a leaf update. diff --git a/polkadot/node/core/dispute-coordinator/src/tests.rs b/polkadot/node/core/dispute-coordinator/src/tests.rs index 0360e357bee4ca70fe9a6ab3a9454d611ddc93fd..13cf2df88223ead28dcd98aeaa3c36db13444991 100644 --- a/polkadot/node/core/dispute-coordinator/src/tests.rs +++ b/polkadot/node/core/dispute-coordinator/src/tests.rs @@ -61,9 +61,9 @@ use polkadot_node_subsystem_test_helpers::{ make_buffered_subsystem_context, mock::new_leaf, TestSubsystemContextHandle, }; use polkadot_primitives::{ - vstaging::NodeFeatures, ApprovalVote, BlockNumber, CandidateCommitments, CandidateEvent, - CandidateHash, CandidateReceipt, CoreIndex, DisputeStatement, ExecutorParams, GroupIndex, Hash, - HeadData, Header, IndexedVec, MultiDisputeStatementSet, ScrapedOnChainVotes, SessionIndex, + ApprovalVote, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, + CandidateReceipt, CoreIndex, DisputeStatement, ExecutorParams, GroupIndex, Hash, HeadData, + Header, IndexedVec, MultiDisputeStatementSet, NodeFeatures, ScrapedOnChainVotes, SessionIndex, SessionInfo, SigningContext, ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorSignature, }; @@ -2226,7 +2226,7 @@ fn resume_dispute_without_local_statement() { test_state }) }) - // Alice should send a DisputeParticiationMessage::Participate on restart since she has no + // Alice should send a DisputeParticipationMessage::Participate on restart since she has no // local statement for the active dispute. .resume(|mut test_state, mut virtual_overseer| { Box::pin(async move { @@ -2390,7 +2390,7 @@ fn resume_dispute_with_local_statement() { test_state }) }) - // Alice should not send a DisputeParticiationMessage::Participate on restart since she has a + // Alice should not send a DisputeParticipationMessage::Participate on restart since she has a // local statement for the active dispute, instead she should try to (re-)send her vote. .resume(|mut test_state, mut virtual_overseer| { let candidate_receipt = make_valid_candidate_receipt(); @@ -2495,7 +2495,7 @@ fn resume_dispute_without_local_statement_or_local_key() { test_state }) }) - // Two should not send a DisputeParticiationMessage::Participate on restart since she is no + // Two should not send a DisputeParticipationMessage::Participate on restart since she is no // validator in that dispute. .resume(|mut test_state, mut virtual_overseer| { Box::pin(async move { diff --git a/polkadot/node/core/parachains-inherent/Cargo.toml b/polkadot/node/core/parachains-inherent/Cargo.toml index 24da4dc1e316f8469704ab57e4742dbc07197992..4f6090f90e9535a7cee8da2e333d5e66a0dea972 100644 --- a/polkadot/node/core/parachains-inherent/Cargo.toml +++ b/polkadot/node/core/parachains-inherent/Cargo.toml @@ -10,11 +10,11 @@ description = "Parachains inherent data provider for Polkadot node" workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../../gum" } thiserror = { workspace = true } -async-trait = "0.1.74" +async-trait = "0.1.79" polkadot-node-subsystem = { path = "../../subsystem" } polkadot-overseer = { path = "../../overseer" } polkadot-primitives = { path = "../../../primitives" } diff --git a/polkadot/node/core/prospective-parachains/Cargo.toml b/polkadot/node/core/prospective-parachains/Cargo.toml index f66a66e859ec0a139e31068f78225597d577d1a9..ab3cef99e54ff6da279ec30728c308ed8bbf3da6 100644 --- a/polkadot/node/core/prospective-parachains/Cargo.toml +++ b/polkadot/node/core/prospective-parachains/Cargo.toml @@ -10,7 +10,7 @@ description = "The Prospective Parachains subsystem. Tracks and handles prospect workspace = true [dependencies] -futures = "0.3.19" +futures = "0.3.30" gum = { package = "tracing-gum", path = "../../gum" } parity-scale-codec = "3.6.4" thiserror = { workspace = true } diff --git a/polkadot/node/core/provisioner/Cargo.toml b/polkadot/node/core/provisioner/Cargo.toml index 2a09e2b5b2cc83a0596c6f0ae153e66f8f6a1e66..ec1a4abb3ece0a3dc8a01e090f9bf3302dde3294 100644 --- a/polkadot/node/core/provisioner/Cargo.toml +++ b/polkadot/node/core/provisioner/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } -futures = "0.3.21" +futures = "0.3.30" gum = { package = "tracing-gum", path = "../../gum" } thiserror = { workspace = true } polkadot-primitives = { path = "../../../primitives" } diff --git a/polkadot/node/core/provisioner/src/disputes/prioritized_selection/mod.rs b/polkadot/node/core/provisioner/src/disputes/prioritized_selection/mod.rs index cb55ce39bc89f3eeee3d1cb319351f659eace478..d7a5a8113369fbafb693098003a3f7d43413634d 100644 --- a/polkadot/node/core/provisioner/src/disputes/prioritized_selection/mod.rs +++ b/polkadot/node/core/provisioner/src/disputes/prioritized_selection/mod.rs @@ -52,7 +52,7 @@ pub const MAX_DISPUTE_VOTES_FORWARDED_TO_RUNTIME: usize = 200; /// `dispute-coordinator`. /// /// This value should be less than `MAX_DISPUTE_VOTES_FORWARDED_TO_RUNTIME`. Increase it in case -/// `provisioner` sends too many `QueryCandidateVotes` messages to `dispite-coordinator`. +/// `provisioner` sends too many `QueryCandidateVotes` messages to `dispute-coordinator`. #[cfg(not(test))] const VOTES_SELECTION_BATCH_SIZE: usize = 1_100; #[cfg(test)] diff --git a/polkadot/node/core/provisioner/src/lib.rs b/polkadot/node/core/provisioner/src/lib.rs index c9ed873d3c25ae1a0fbe590a51412839e8105634..5cfcb96dc2bc710c42400fa49fa888195c79aa48 100644 --- a/polkadot/node/core/provisioner/src/lib.rs +++ b/polkadot/node/core/provisioner/src/lib.rs @@ -42,11 +42,11 @@ use polkadot_node_subsystem_util::{ TimeoutExt, }; use polkadot_primitives::{ - vstaging::{node_features::FeatureIndex, NodeFeatures}, - BackedCandidate, BlockNumber, CandidateHash, CandidateReceipt, CoreIndex, CoreState, Hash, - Id as ParaId, OccupiedCoreAssumption, SessionIndex, SignedAvailabilityBitfield, ValidatorIndex, + node_features::FeatureIndex, BackedCandidate, BlockNumber, CandidateHash, CandidateReceipt, + CoreIndex, CoreState, Hash, Id as ParaId, NodeFeatures, OccupiedCoreAssumption, SessionIndex, + SignedAvailabilityBitfield, ValidatorIndex, }; -use std::collections::{BTreeMap, HashMap, HashSet}; +use std::collections::{BTreeMap, HashMap}; mod disputes; mod error; @@ -598,13 +598,11 @@ async fn select_candidate_hashes_from_tracked( candidates: &[CandidateReceipt], relay_parent: Hash, sender: &mut impl overseer::ProvisionerSenderTrait, -) -> Result, Error> { +) -> Result>, Error> { let block_number = get_block_number_under_construction(relay_parent, sender).await?; let mut selected_candidates = - Vec::with_capacity(candidates.len().min(availability_cores.len())); - let mut selected_parachains = - HashSet::with_capacity(candidates.len().min(availability_cores.len())); + HashMap::with_capacity(candidates.len().min(availability_cores.len())); gum::debug!( target: LOG_TARGET, @@ -638,7 +636,7 @@ async fn select_candidate_hashes_from_tracked( CoreState::Free => continue, }; - if selected_parachains.contains(&scheduled_core.para_id) { + if selected_candidates.contains_key(&scheduled_core.para_id) { // We already picked a candidate for this parachain. Elastic scaling only works with // prospective parachains mode. continue @@ -677,8 +675,10 @@ async fn select_candidate_hashes_from_tracked( "Selected candidate receipt", ); - selected_parachains.insert(candidate.descriptor.para_id); - selected_candidates.push((candidate_hash, candidate.descriptor.relay_parent)); + selected_candidates.insert( + candidate.descriptor.para_id, + vec![(candidate_hash, candidate.descriptor.relay_parent)], + ); } } @@ -695,12 +695,12 @@ async fn request_backable_candidates( bitfields: &[SignedAvailabilityBitfield], relay_parent: Hash, sender: &mut impl overseer::ProvisionerSenderTrait, -) -> Result, Error> { +) -> Result>, Error> { let block_number = get_block_number_under_construction(relay_parent, sender).await?; // Record how many cores are scheduled for each paraid. Use a BTreeMap because // we'll need to iterate through them. - let mut scheduled_cores: BTreeMap = BTreeMap::new(); + let mut scheduled_cores_per_para: BTreeMap = BTreeMap::new(); // The on-chain ancestors of a para present in availability-cores. let mut ancestors: HashMap = HashMap::with_capacity(availability_cores.len()); @@ -709,7 +709,7 @@ async fn request_backable_candidates( let core_idx = CoreIndex(core_idx as u32); match core { CoreState::Scheduled(scheduled_core) => { - *scheduled_cores.entry(scheduled_core.para_id).or_insert(0) += 1; + *scheduled_cores_per_para.entry(scheduled_core.para_id).or_insert(0) += 1; }, CoreState::Occupied(occupied_core) => { let is_available = bitfields_indicate_availability( @@ -726,14 +726,14 @@ async fn request_backable_candidates( if let Some(ref scheduled_core) = occupied_core.next_up_on_available { // Request a new backable candidate for the newly scheduled para id. - *scheduled_cores.entry(scheduled_core.para_id).or_insert(0) += 1; + *scheduled_cores_per_para.entry(scheduled_core.para_id).or_insert(0) += 1; } } else if occupied_core.time_out_at <= block_number { // Timed out before being available. if let Some(ref scheduled_core) = occupied_core.next_up_on_time_out { // Candidate's availability timed out, practically same as scheduled. - *scheduled_cores.entry(scheduled_core.para_id).or_insert(0) += 1; + *scheduled_cores_per_para.entry(scheduled_core.para_id).or_insert(0) += 1; } } else { // Not timed out and not available. @@ -747,10 +747,10 @@ async fn request_backable_candidates( }; } - let mut selected_candidates: Vec<(CandidateHash, Hash)> = - Vec::with_capacity(availability_cores.len()); + let mut selected_candidates: HashMap> = + HashMap::with_capacity(scheduled_cores_per_para.len()); - for (para_id, core_count) in scheduled_cores { + for (para_id, core_count) in scheduled_cores_per_para { let para_ancestors = ancestors.remove(¶_id).unwrap_or_default(); // If elastic scaling MVP is disabled, only allow one candidate per parachain. @@ -777,7 +777,7 @@ async fn request_backable_candidates( continue } - selected_candidates.extend(response.into_iter().take(core_count)); + selected_candidates.insert(para_id, response); } Ok(selected_candidates) @@ -826,33 +826,38 @@ async fn select_candidates( selected_candidates.clone(), tx, )); - let mut candidates = rx.await.map_err(|err| Error::CanceledBackedCandidates(err))?; + let candidates = rx.await.map_err(|err| Error::CanceledBackedCandidates(err))?; gum::trace!(target: LOG_TARGET, leaf_hash=?relay_parent, "Got {} backed candidates", candidates.len()); // keep only one candidate with validation code. let mut with_validation_code = false; - candidates.retain(|c| { - if c.candidate().commitments.new_validation_code.is_some() { - if with_validation_code { - return false + // merge the candidates into a common collection, preserving the order + let mut merged_candidates = Vec::with_capacity(availability_cores.len()); + + for para_candidates in candidates.into_values() { + for candidate in para_candidates { + if candidate.candidate().commitments.new_validation_code.is_some() { + if with_validation_code { + break + } else { + with_validation_code = true; + } } - with_validation_code = true; + merged_candidates.push(candidate); } - - true - }); + } gum::debug!( target: LOG_TARGET, - n_candidates = candidates.len(), + n_candidates = merged_candidates.len(), n_cores = availability_cores.len(), ?relay_parent, "Selected backed candidates", ); - Ok(candidates) + Ok(merged_candidates) } /// Produces a block number 1 higher than that of the relay parent diff --git a/polkadot/node/core/provisioner/src/tests.rs b/polkadot/node/core/provisioner/src/tests.rs index bdb4f85f4009bdfff879cefcb8928899c5b85c81..823b1d86e4617691ecfafc132fd02e9a3721a460 100644 --- a/polkadot/node/core/provisioner/src/tests.rs +++ b/polkadot/node/core/provisioner/src/tests.rs @@ -258,6 +258,8 @@ mod select_candidates { BlockNumber, CandidateCommitments, CommittedCandidateReceipt, PersistedValidationData, }; use rstest::rstest; + use std::ops::Not; + use CoreState::{Free, Scheduled}; const BLOCK_UNDER_PRODUCTION: BlockNumber = 128; @@ -323,9 +325,6 @@ mod select_candidates { // 11: Occupied(next_up_on_available and available, but different successor para_id) // ] fn mock_availability_cores_one_per_para() -> Vec { - use std::ops::Not; - use CoreState::{Free, Scheduled}; - vec![ // 0: Free, Free, @@ -389,9 +388,6 @@ mod select_candidates { // For test purposes with multiple possible cores assigned to a para, we always return this set // of availability cores: fn mock_availability_cores_multiple_per_para() -> Vec { - use std::ops::Not; - use CoreState::{Free, Scheduled}; - vec![ // 0: Free, Free, @@ -562,7 +558,10 @@ mod select_candidates { use ChainApiMessage::BlockNumber; use RuntimeApiMessage::Request; - let mut backed_iter = expected.clone().into_iter(); + let mut backed = expected.clone().into_iter().fold(HashMap::new(), |mut acc, candidate| { + acc.entry(candidate.descriptor().para_id).or_insert(vec![]).push(candidate); + acc + }); expected.sort_by_key(|c| c.candidate().descriptor.para_id); let mut candidates_iter = expected @@ -583,11 +582,30 @@ mod select_candidates { hashes, sender, )) => { - let response: Vec = - backed_iter.by_ref().take(hashes.len()).collect(); - let expected_hashes: Vec<(CandidateHash, Hash)> = response + let mut response: HashMap> = HashMap::new(); + for (para_id, requested_candidates) in hashes.clone() { + response.insert( + para_id, + backed + .get_mut(¶_id) + .unwrap() + .drain(0..requested_candidates.len()) + .collect(), + ); + } + let expected_hashes: HashMap> = response .iter() - .map(|candidate| (candidate.hash(), candidate.descriptor().relay_parent)) + .map(|(para_id, candidates)| { + ( + *para_id, + candidates + .iter() + .map(|candidate| { + (candidate.hash(), candidate.descriptor().relay_parent) + }) + .collect(), + ) + }) .collect(); assert_eq!(expected_hashes, hashes); @@ -768,7 +786,7 @@ mod select_candidates { #[rstest] #[case(ProspectiveParachainsMode::Disabled)] #[case(ProspectiveParachainsMode::Enabled {max_candidate_depth: 0, allowed_ancestry_len: 0})] - fn selects_max_one_code_upgrade( + fn selects_max_one_code_upgrade_one_core_per_para( #[case] prospective_parachains_mode: ProspectiveParachainsMode, ) { let mock_cores = mock_availability_cores_one_per_para(); @@ -780,7 +798,12 @@ mod select_candidates { let cores = [1, 4, 7, 8, 10, 12]; let cores_with_code = [1, 4, 8]; - let expected_cores = [1, 7, 10, 12]; + // We can't be sure which one code upgrade the provisioner will pick. We can only assert + // that it only picks one. These are the possible cores for which the provisioner will + // supply candidates. + // There are multiple possibilities depending on which code upgrade it + // chooses. + let possible_expected_cores = [[1, 7, 10, 12], [4, 7, 10, 12], [7, 8, 10, 12]]; let committed_receipts: Vec<_> = (0..=mock_cores.len()) .map(|i| { @@ -820,8 +843,10 @@ mod select_candidates { // Then, some of them get filtered due to new validation code rule. let expected_backed: Vec<_> = cores.iter().map(|&idx| backed_candidates[idx].clone()).collect(); - let expected_backed_filtered: Vec<_> = - expected_cores.iter().map(|&idx| candidates[idx].clone()).collect(); + let expected_backed_filtered: Vec> = possible_expected_cores + .iter() + .map(|indices| indices.iter().map(|&idx| candidates[idx].clone()).collect()) + .collect(); let mock_cores_clone = mock_cores.clone(); @@ -850,13 +875,120 @@ mod select_candidates { assert_eq!(result.len(), 4); - result.into_iter().for_each(|c| { - assert!( - expected_backed_filtered.iter().any(|c2| c.candidate().corresponds_to(c2)), - "Failed to find candidate: {:?}", - c, - ) - }); + assert!(expected_backed_filtered.iter().any(|expected_backed_filtered| { + result.clone().into_iter().all(|c| { + expected_backed_filtered.iter().any(|c2| c.candidate().corresponds_to(c2)) + }) + })); + }, + ) + } + + #[test] + fn selects_max_one_code_upgrade_multiple_cores_per_para() { + let prospective_parachains_mode = + ProspectiveParachainsMode::Enabled { max_candidate_depth: 0, allowed_ancestry_len: 0 }; + let mock_cores = vec![ + // 0: Scheduled(default), + Scheduled(scheduled_core(1)), + // 1: Scheduled(default), + Scheduled(scheduled_core(2)), + // 2: Scheduled(default), + Scheduled(scheduled_core(2)), + // 3: Scheduled(default), + Scheduled(scheduled_core(2)), + // 4: Scheduled(default), + Scheduled(scheduled_core(3)), + // 5: Scheduled(default), + Scheduled(scheduled_core(3)), + // 6: Scheduled(default), + Scheduled(scheduled_core(3)), + ]; + + let empty_hash = PersistedValidationData::::default().hash(); + let cores_with_code = [0, 2, 4, 5]; + + // We can't be sure which one code upgrade the provisioner will pick. We can only assert + // that it only picks one. + // These are the possible cores for which the provisioner will + // supply candidates. There are multiple possibilities depending on which code upgrade it + // chooses. + let possible_expected_cores = [vec![0, 1], vec![1, 2, 3], vec![4, 1]]; + + let committed_receipts: Vec<_> = (0..mock_cores.len()) + .map(|i| { + let mut descriptor = dummy_candidate_descriptor(dummy_hash()); + descriptor.para_id = mock_cores[i].para_id().unwrap(); + descriptor.persisted_validation_data_hash = empty_hash; + descriptor.pov_hash = Hash::from_low_u64_be(i as u64); + CommittedCandidateReceipt { + descriptor, + commitments: CandidateCommitments { + new_validation_code: if cores_with_code.contains(&i) { + Some(vec![].into()) + } else { + None + }, + ..Default::default() + }, + } + }) + .collect(); + + // Input to select_candidates + let candidates: Vec<_> = committed_receipts.iter().map(|r| r.to_plain()).collect(); + // Build possible outputs from select_candidates + let backed_candidates: Vec<_> = committed_receipts + .iter() + .map(|committed_receipt| { + BackedCandidate::new( + committed_receipt.clone(), + Vec::new(), + default_bitvec(MOCK_GROUP_SIZE), + None, + ) + }) + .collect(); + + // First, provisioner will request backable candidates for each scheduled core. + // Then, some of them get filtered due to new validation code rule. + let expected_backed: Vec<_> = + (0..mock_cores.len()).map(|idx| backed_candidates[idx].clone()).collect(); + let expected_backed_filtered: Vec> = possible_expected_cores + .iter() + .map(|indices| indices.iter().map(|&idx| candidates[idx].clone()).collect()) + .collect(); + + let mock_cores_clone = mock_cores.clone(); + + test_harness( + |r| { + mock_overseer( + r, + mock_cores_clone, + expected_backed, + HashMap::new(), + prospective_parachains_mode, + ) + }, + |mut tx: TestSubsystemSender| async move { + let result = select_candidates( + &mock_cores, + &[], + &candidates, + prospective_parachains_mode, + true, + Default::default(), + &mut tx, + ) + .await + .unwrap(); + + assert!(expected_backed_filtered.iter().any(|expected_backed_filtered| { + result.clone().into_iter().all(|c| { + expected_backed_filtered.iter().any(|c2| c.candidate().corresponds_to(c2)) + }) && (expected_backed_filtered.len() == result.len()) + })); }, ) } diff --git a/polkadot/node/core/pvf-checker/Cargo.toml b/polkadot/node/core/pvf-checker/Cargo.toml index f4f954e316c0b758a4cb1e94f80b729257349632..91b12b8680977fba345e0ccfa9b06f1bcb24ef79 100644 --- a/polkadot/node/core/pvf-checker/Cargo.toml +++ b/polkadot/node/core/pvf-checker/Cargo.toml @@ -10,7 +10,7 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" thiserror = { workspace = true } gum = { package = "tracing-gum", path = "../../gum" } diff --git a/polkadot/node/core/pvf-checker/src/tests.rs b/polkadot/node/core/pvf-checker/src/tests.rs index b0401ecdc3bd2368e34a263842ea566b712884a0..b2365fe53e52c0f2e1c7cb18768c33cdd6801ca9 100644 --- a/polkadot/node/core/pvf-checker/src/tests.rs +++ b/polkadot/node/core/pvf-checker/src/tests.rs @@ -39,8 +39,8 @@ use std::{collections::HashMap, sync::Arc, time::Duration}; type VirtualOverseer = TestSubsystemContextHandle; -fn dummy_validation_code_hash(descriminator: u8) -> ValidationCodeHash { - ValidationCode(vec![descriminator]).hash() +fn dummy_validation_code_hash(discriminator: u8) -> ValidationCodeHash { + ValidationCode(vec![discriminator]).hash() } struct StartsNewSession { @@ -511,7 +511,7 @@ fn reactivating_pvf_leads_to_second_check() { .reply(PreCheckOutcome::Valid); test_state.expect_submit_vote(&mut handle).await.reply_ok(); - // Now activate a descdedant leaf, where the PVF is not present. + // Now activate a descendant leaf, where the PVF is not present. test_state .active_leaves_update( &mut handle, diff --git a/polkadot/node/core/pvf/Cargo.toml b/polkadot/node/core/pvf/Cargo.toml index 9ed64b88ffde857c6abe171a7d1cad7dd66dd509..a0233d6b751769a4f51f3eda0f7ca0ec75fa1cb7 100644 --- a/polkadot/node/core/pvf/Cargo.toml +++ b/polkadot/node/core/pvf/Cargo.toml @@ -14,7 +14,7 @@ always-assert = "0.1" array-bytes = "6.1" blake3 = "1.5" cfg-if = "1.0" -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../../gum" } is_executable = "1.0.1" @@ -50,7 +50,7 @@ hex-literal = "0.4.1" polkadot-node-core-pvf-common = { path = "common", features = ["test-utils"] } # For benches and integration tests, depend on ourselves with the test-utils # feature. -polkadot-node-core-pvf = { path = ".", features = ["test-utils"] } +polkadot-node-core-pvf = { path = "", features = ["test-utils"] } rococo-runtime = { path = "../../../runtime/rococo" } adder = { package = "test-parachain-adder", path = "../../../parachain/test-parachains/adder" } diff --git a/polkadot/node/core/pvf/common/Cargo.toml b/polkadot/node/core/pvf/common/Cargo.toml index 56bad9792fa0b6accea6460025c64c18e8e1e5d8..f3eb9d919aae6ea6ded7df58a088d22ebbe66325 100644 --- a/polkadot/node/core/pvf/common/Cargo.toml +++ b/polkadot/node/core/pvf/common/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] cfg-if = "1.0" cpu-time = "1.0.0" -futures = "0.3.21" +futures = "0.3.30" gum = { package = "tracing-gum", path = "../../../gum" } libc = "0.2.152" thiserror = { workspace = true } diff --git a/polkadot/node/core/pvf/common/src/pvf.rs b/polkadot/node/core/pvf/common/src/pvf.rs index 3f5b4d7ca70c14d53770af2dc470e4a2e5e572a4..340dffe07c3fd3e8b049f1a126a348a7ff4e326f 100644 --- a/polkadot/node/core/pvf/common/src/pvf.rs +++ b/polkadot/node/core/pvf/common/src/pvf.rs @@ -85,9 +85,9 @@ impl PvfPrepData { /// Creates a structure for tests. #[cfg(feature = "test-utils")] pub fn from_discriminator_and_timeout(num: u32, timeout: Duration) -> Self { - let descriminator_buf = num.to_le_bytes().to_vec(); + let discriminator_buf = num.to_le_bytes().to_vec(); Self::from_code( - descriminator_buf, + discriminator_buf, ExecutorParams::default(), timeout, PrepareJobKind::Compilation, diff --git a/polkadot/node/core/pvf/src/host.rs b/polkadot/node/core/pvf/src/host.rs index 8ec46f4b08f193082f1fcf8f6a08cae2ef261765..59d5a7e20a887ef35282f0a8ae8c0998625d825a 100644 --- a/polkadot/node/core/pvf/src/host.rs +++ b/polkadot/node/core/pvf/src/host.rs @@ -917,7 +917,7 @@ async fn sweeper_task(mut sweeper_rx: mpsc::Receiver) { gum::trace!( target: LOG_TARGET, ?result, - "Sweeped the artifact file {}", + "Swept the artifact file {}", condemned.display(), ); }, diff --git a/polkadot/node/core/pvf/src/priority.rs b/polkadot/node/core/pvf/src/priority.rs index d4bd49eaee84037043a46f6645612627b081c0f4..d1ef9c604b117d10aca5b99cc8ccd1953fa522f5 100644 --- a/polkadot/node/core/pvf/src/priority.rs +++ b/polkadot/node/core/pvf/src/priority.rs @@ -29,7 +29,7 @@ pub enum Priority { } impl Priority { - /// Returns `true` if `self` is `Crticial` + /// Returns `true` if `self` is `Critical` pub fn is_critical(self) -> bool { self == Priority::Critical } diff --git a/polkadot/node/core/pvf/src/worker_interface.rs b/polkadot/node/core/pvf/src/worker_interface.rs index ad9f0294c09400a47f249bdf16855b3c110f155a..93fffc80662266a6a1112f6d5120cb454bca6276 100644 --- a/polkadot/node/core/pvf/src/worker_interface.rs +++ b/polkadot/node/core/pvf/src/worker_interface.rs @@ -130,11 +130,11 @@ where fn make_tmppath(prefix: &str, dir: &Path) -> PathBuf { use rand::distributions::Alphanumeric; - const DESCRIMINATOR_LEN: usize = 10; + const DISCRIMINATOR_LEN: usize = 10; - let mut buf = Vec::with_capacity(prefix.len() + DESCRIMINATOR_LEN); + let mut buf = Vec::with_capacity(prefix.len() + DISCRIMINATOR_LEN); buf.extend(prefix.as_bytes()); - buf.extend(rand::thread_rng().sample_iter(&Alphanumeric).take(DESCRIMINATOR_LEN)); + buf.extend(rand::thread_rng().sample_iter(&Alphanumeric).take(DISCRIMINATOR_LEN)); let s = std::str::from_utf8(&buf) .expect("the string is collected from a valid utf-8 sequence; qed"); diff --git a/polkadot/node/core/pvf/tests/it/main.rs b/polkadot/node/core/pvf/tests/it/main.rs index cdfbcd8e57855121a7128a0ed616df4f8ecb0cbd..16ef23c69cad9d40d6cd79d2a754d8a347bfac3c 100644 --- a/polkadot/node/core/pvf/tests/it/main.rs +++ b/polkadot/node/core/pvf/tests/it/main.rs @@ -543,9 +543,9 @@ async fn all_security_features_work() { // artifacts cache existing. #[cfg(all(feature = "ci-only-tests", target_os = "linux"))] #[tokio::test] -async fn nonexistant_cache_dir() { +async fn nonexistent_cache_dir() { let host = TestHost::new_with_config(|cfg| { - cfg.cache_path = cfg.cache_path.join("nonexistant_cache_dir"); + cfg.cache_path = cfg.cache_path.join("nonexistent_cache_dir"); }) .await; diff --git a/polkadot/node/core/runtime-api/Cargo.toml b/polkadot/node/core/runtime-api/Cargo.toml index 2de3a6ee325ad6d08ea3377e8fccee56b7e505fa..91f5c35b27949f9e0918eaad7adbe5f2557576e8 100644 --- a/polkadot/node/core/runtime-api/Cargo.toml +++ b/polkadot/node/core/runtime-api/Cargo.toml @@ -10,7 +10,7 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" gum = { package = "tracing-gum", path = "../../gum" } schnellru = "0.2.1" @@ -25,8 +25,8 @@ polkadot-node-subsystem-types = { path = "../../subsystem-types" } sp-api = { path = "../../../../substrate/primitives/api" } sp-core = { path = "../../../../substrate/primitives/core" } sp-keyring = { path = "../../../../substrate/primitives/keyring" } -async-trait = "0.1.74" -futures = { version = "0.3.21", features = ["thread-pool"] } +async-trait = "0.1.79" +futures = { version = "0.3.30", features = ["thread-pool"] } polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } polkadot-node-primitives = { path = "../../primitives" } test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../../primitives/test-helpers" } diff --git a/polkadot/node/core/runtime-api/src/cache.rs b/polkadot/node/core/runtime-api/src/cache.rs index 9674cda983852be245da33e953bce4c8a395b363..acdb256ab36ca7392c0947237df6d7feb449247d 100644 --- a/polkadot/node/core/runtime-api/src/cache.rs +++ b/polkadot/node/core/runtime-api/src/cache.rs @@ -20,14 +20,12 @@ use schnellru::{ByLength, LruMap}; use sp_consensus_babe::Epoch; use polkadot_primitives::{ - async_backing, slashing, - vstaging::{self, ApprovalVotingParams}, - AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreIndex, CoreState, DisputeState, ExecutorParams, - GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, - OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, - SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, - ValidatorSignature, + async_backing, slashing, ApprovalVotingParams, AuthorityDiscoveryId, BlockNumber, + CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreIndex, + CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, Id as ParaId, + InboundDownwardMessage, InboundHrmpMessage, NodeFeatures, OccupiedCoreAssumption, + PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, + ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; /// For consistency we have the same capacity for all caches. We use 128 as we'll only need that @@ -69,7 +67,7 @@ pub(crate) struct RequestResultCache { disabled_validators: LruMap>, para_backing_state: LruMap<(Hash, ParaId), Option>, async_backing_params: LruMap, - node_features: LruMap, + node_features: LruMap, approval_voting_params: LruMap, claim_queue: LruMap>>, } @@ -454,17 +452,14 @@ impl RequestResultCache { self.minimum_backing_votes.insert(session_index, minimum_backing_votes); } - pub(crate) fn node_features( - &mut self, - session_index: SessionIndex, - ) -> Option<&vstaging::NodeFeatures> { + pub(crate) fn node_features(&mut self, session_index: SessionIndex) -> Option<&NodeFeatures> { self.node_features.get(&session_index).map(|f| &*f) } pub(crate) fn cache_node_features( &mut self, session_index: SessionIndex, - features: vstaging::NodeFeatures, + features: NodeFeatures, ) { self.node_features.insert(session_index, features); } @@ -594,6 +589,6 @@ pub(crate) enum RequestResult { DisabledValidators(Hash, Vec), ParaBackingState(Hash, ParaId, Option), AsyncBackingParams(Hash, async_backing::AsyncBackingParams), - NodeFeatures(SessionIndex, vstaging::NodeFeatures), + NodeFeatures(SessionIndex, NodeFeatures), ClaimQueue(Hash, BTreeMap>), } diff --git a/polkadot/node/core/runtime-api/src/tests.rs b/polkadot/node/core/runtime-api/src/tests.rs index fefd2d3f86246cdd72a9b16a6608288807fc7d18..73c661c40762effc2efbbfa66f7f46a47bbe2d16 100644 --- a/polkadot/node/core/runtime-api/src/tests.rs +++ b/polkadot/node/core/runtime-api/src/tests.rs @@ -20,14 +20,12 @@ use polkadot_node_primitives::{BabeAllowedSlots, BabeEpoch, BabeEpochConfigurati use polkadot_node_subsystem::SpawnGlue; use polkadot_node_subsystem_test_helpers::make_subsystem_context; use polkadot_primitives::{ - async_backing, slashing, - vstaging::{ApprovalVotingParams, NodeFeatures}, - AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreIndex, CoreState, DisputeState, ExecutorParams, - GroupRotationInfo, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, - OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, - SessionIndex, SessionInfo, Slot, ValidationCode, ValidationCodeHash, ValidatorId, - ValidatorIndex, ValidatorSignature, + async_backing, slashing, ApprovalVotingParams, AuthorityDiscoveryId, BlockNumber, + CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreIndex, + CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Id as ParaId, + InboundDownwardMessage, InboundHrmpMessage, NodeFeatures, OccupiedCoreAssumption, + PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, + Slot, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; use sp_api::ApiError; use sp_core::testing::TaskExecutor; @@ -299,12 +297,12 @@ impl RuntimeApiSubsystemClient for MockSubsystemClient { #[test] fn requests_authorities() { let (ctx, mut ctx_handle) = make_subsystem_context(TaskExecutor::new()); - let substem_client = Arc::new(MockSubsystemClient::default()); + let subsystem_client = Arc::new(MockSubsystemClient::default()); let relay_parent = [1; 32].into(); let spawner = sp_core::testing::TaskExecutor::new(); let subsystem = - RuntimeApiSubsystem::new(substem_client.clone(), Metrics(None), SpawnGlue(spawner)); + RuntimeApiSubsystem::new(subsystem_client.clone(), Metrics(None), SpawnGlue(spawner)); let subsystem_task = run(ctx, subsystem).map(|x| x.unwrap()); let test_task = async move { let (tx, rx) = oneshot::channel(); @@ -315,7 +313,7 @@ fn requests_authorities() { }) .await; - assert_eq!(rx.await.unwrap().unwrap(), substem_client.authorities); + assert_eq!(rx.await.unwrap().unwrap(), subsystem_client.authorities); ctx_handle.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; }; @@ -1042,7 +1040,7 @@ fn requests_submit_pvf_check_statement() { let _ = rx.await.unwrap().unwrap(); assert_eq!( - &*subsystem_client.submitted_pvf_check_statement.lock().expect("poisened mutex"), + &*subsystem_client.submitted_pvf_check_statement.lock().expect("poisoned mutex"), &[(stmt.clone(), sig.clone()), (stmt.clone(), sig.clone())] ); diff --git a/polkadot/node/jaeger/Cargo.toml b/polkadot/node/jaeger/Cargo.toml index 23ab8f8421084751fa61d48bd3aa2b346e5a75c1..6fa3d41eddb1e93f6e3a2f8c1101cfd7c4eafe75 100644 --- a/polkadot/node/jaeger/Cargo.toml +++ b/polkadot/node/jaeger/Cargo.toml @@ -18,6 +18,6 @@ polkadot-node-primitives = { path = "../primitives" } sc-network = { path = "../../../substrate/client/network" } sp-core = { path = "../../../substrate/primitives/core" } thiserror = { workspace = true } -tokio = "1.24.2" +tokio = "1.37" log = { workspace = true, default-features = true } parity-scale-codec = { version = "3.6.1", default-features = false } diff --git a/polkadot/node/jaeger/src/spans.rs b/polkadot/node/jaeger/src/spans.rs index 4038d41344f2db09908fc50c82ddd787d374f505..4816fccf3b96e741027d0b0897627486e420d80e 100644 --- a/polkadot/node/jaeger/src/spans.rs +++ b/polkadot/node/jaeger/src/spans.rs @@ -70,7 +70,7 @@ //! let root_span = //! jaeger::Span::new(relay_parent, "root_of_aaall_spans"); //! -//! // the prefered way of adding additional delayed information: +//! // the preferred way of adding additional delayed information: //! let span = root_span.child("inner"); //! //! // ... more operations ... diff --git a/polkadot/node/malus/Cargo.toml b/polkadot/node/malus/Cargo.toml index b3eb856f08efec851d9f19a07008e0d6853d81ae..2f63c2f0938d72bea717ac5aff0b2faace47808b 100644 --- a/polkadot/node/malus/Cargo.toml +++ b/polkadot/node/malus/Cargo.toml @@ -40,11 +40,11 @@ polkadot-node-primitives = { path = "../primitives" } polkadot-primitives = { path = "../../primitives" } color-eyre = { version = "0.6.1", default-features = false } assert_matches = "1.5" -async-trait = "0.1.74" +async-trait = "0.1.79" sp-keystore = { path = "../../../substrate/primitives/keystore" } sp-core = { path = "../../../substrate/primitives/core" } clap = { version = "4.5.3", features = ["derive"] } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../gum" } erasure = { package = "polkadot-erasure-coding", path = "../../erasure-coding" } @@ -58,7 +58,7 @@ polkadot-node-core-pvf-prepare-worker = { path = "../core/pvf/prepare-worker" } [dev-dependencies] polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } sp-core = { path = "../../../substrate/primitives/core" } -futures = { version = "0.3.21", features = ["thread-pool"] } +futures = { version = "0.3.30", features = ["thread-pool"] } [build-dependencies] substrate-build-script-utils = { path = "../../../substrate/utils/build-script-utils" } diff --git a/polkadot/node/malus/README.md b/polkadot/node/malus/README.md index e0c7893a75319ed40b6bde633309f209dac91225..25453a1980e4b8f66ac50d3be89eb798f4b2b0e6 100644 --- a/polkadot/node/malus/README.md +++ b/polkadot/node/malus/README.md @@ -18,7 +18,7 @@ defined in the [(DSL[(**D**omain **S**pecific **L**anguage)]) doc](https://parit ## Usage -> Assumes you already gained permissiones, ping in element `@javier:matrix.parity.io` to get access. +> Assumes you already gained permissions, ping in element `@javier:matrix.parity.io` to get access. > and you have cloned the [zombienet][zombienet] repo. To launch a test case in the development cluster use (e.g. for the ./node/malus/integrationtests/0001-dispute-valid-block.toml): diff --git a/polkadot/node/metrics/Cargo.toml b/polkadot/node/metrics/Cargo.toml index c567278f70ea79740510aea4099170179f7e1f85..fbf0abf829e136a139f3de0db3aef216b2e49679 100644 --- a/polkadot/node/metrics/Cargo.toml +++ b/polkadot/node/metrics/Cargo.toml @@ -10,7 +10,7 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../gum" } @@ -30,7 +30,7 @@ log = { workspace = true, default-features = true } assert_cmd = "2.0.4" tempfile = "3.2.0" hyper = { version = "0.14.20", default-features = false, features = ["http1", "tcp"] } -tokio = "1.24.2" +tokio = "1.37" polkadot-test-service = { path = "../test/service", features = ["runtime-metrics"] } substrate-test-utils = { path = "../../../substrate/test-utils" } sc-service = { path = "../../../substrate/client/service" } diff --git a/polkadot/node/network/approval-distribution/Cargo.toml b/polkadot/node/network/approval-distribution/Cargo.toml index 2bc09c5f42acad52a8a96dab3916ed6314f27d56..4c04ad83f84f3c75e69ac8771670816b7ea7b4fa 100644 --- a/polkadot/node/network/approval-distribution/Cargo.toml +++ b/polkadot/node/network/approval-distribution/Cargo.toml @@ -20,7 +20,7 @@ polkadot-node-jaeger = { path = "../../jaeger" } rand = "0.8" itertools = "0.10.5" -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../../gum" } bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } @@ -37,5 +37,5 @@ schnorrkel = { version = "0.11.4", default-features = false } # rand_core should match schnorrkel rand_core = "0.6.2" rand_chacha = "0.3.1" -env_logger = "0.9.0" +env_logger = "0.11" log = { workspace = true, default-features = true } diff --git a/polkadot/node/network/approval-distribution/src/lib.rs b/polkadot/node/network/approval-distribution/src/lib.rs index f4e40270160cbf54cfeb516c5d7deeed59e7373a..d360a18423e6bdea62ee0609cf5ea67f16e2e68e 100644 --- a/polkadot/node/network/approval-distribution/src/lib.rs +++ b/polkadot/node/network/approval-distribution/src/lib.rs @@ -230,7 +230,7 @@ impl ApprovalEntry { Ok(()) } - // Get the assignment certiticate and claimed candidates. + // Get the assignment certificate and claimed candidates. pub fn assignment(&self) -> (IndirectAssignmentCertV2, CandidateBitfield) { (self.assignment.clone(), self.assignment_claimed_candidates.clone()) } @@ -404,7 +404,7 @@ impl Knowledge { }, }; - // In case of succesful insertion of multiple candidate assignments create additional + // In case of successful insertion of multiple candidate assignments create additional // entries for each assigned candidate. This fakes knowledge of individual assignments, but // we need to share the same `MessageSubject` with the followup approval candidate index. if kind == MessageKind::Assignment && success && message.1.count_ones() > 1 { @@ -1897,10 +1897,10 @@ impl State { _ => break, }; - // Any peer which is in the `known_by` see and we know its peer_id authorithy id + // Any peer which is in the `known_by` see and we know its peer_id authority id // mapping has already been sent all messages it's meant to get for that block and // all in-scope prior blocks. In case, we just learnt about its peer_id - // authorithy-id mapping we have to retry sending the messages that should be sent + // authority-id mapping we have to retry sending the messages that should be sent // to it for all un-finalized blocks. if entry.known_by.contains_key(&peer_id) && !retry_known_blocks { break @@ -2199,7 +2199,7 @@ impl State { sanitized_assignments } - // Filter out obviously invalid candidate indicies. + // Filter out obviously invalid candidate indices. async fn sanitize_v1_approvals( &mut self, peer_id: PeerId, @@ -2226,7 +2226,7 @@ impl State { sanitized_approvals } - // Filter out obviously invalid candidate indicies. + // Filter out obviously invalid candidate indices. async fn sanitize_v2_approvals( &mut self, peer_id: PeerId, @@ -2260,7 +2260,7 @@ impl State { // The modifier accepts as inputs the current required-routing state, whether // the message is locally originating, and the validator index of the message issuer. // -// Then, if the topology is known, this progates messages to all peers in the required +// Then, if the topology is known, this propagates messages to all peers in the required // routing set which are aware of the block. Peers which are unaware of the block // will have the message sent when it enters their view in `unify_with_peer`. // @@ -2440,7 +2440,7 @@ impl ApprovalDistribution { gum::trace!(target: LOG_TARGET, "active leaves signal (ignored)"); // the relay chain blocks relevant to the approval subsystems // are those that are available, but not finalized yet - // actived and deactivated heads hence are irrelevant to this subsystem, other than + // activated and deactivated heads hence are irrelevant to this subsystem, other than // for tracing purposes. if let Some(activated) = update.activated { let head = activated.hash; diff --git a/polkadot/node/network/approval-distribution/src/metrics.rs b/polkadot/node/network/approval-distribution/src/metrics.rs index 0642b1b2e0cdcea51d4a34263bad87ad6612f035..60c7f2f6d3b895c1f406ed8f3dfbbbe5c499eab1 100644 --- a/polkadot/node/network/approval-distribution/src/metrics.rs +++ b/polkadot/node/network/approval-distribution/src/metrics.rs @@ -299,7 +299,7 @@ impl MetricsTrait for Metrics { prometheus::CounterVec::new( prometheus::Opts::new( "polkadot_parachain_assignments_received_result", - "Result of a processed assignement", + "Result of a processed assignment", ), &["status"] )?, diff --git a/polkadot/node/network/approval-distribution/src/tests.rs b/polkadot/node/network/approval-distribution/src/tests.rs index 6c88dd53ad364ab0eabed065c9098c1dfcdd81d9..3159fe2ae5e8d4e4df0ba7bf2f4c42b44d658686 100644 --- a/polkadot/node/network/approval-distribution/src/tests.rs +++ b/polkadot/node/network/approval-distribution/src/tests.rs @@ -394,7 +394,7 @@ fn try_import_the_same_assignment() { setup_peer_with_view(overseer, &peer_b, view![hash], ValidationVersion::V1).await; setup_peer_with_view(overseer, &peer_c, view![hash], ValidationVersion::V1).await; - // Set up a gossip topology, where a, b, c and d are topology neighboors to the node under + // Set up a gossip topology, where a, b, c and d are topology neighbors to the node under // testing. let peers_with_optional_peer_id = peers .iter() @@ -491,7 +491,7 @@ fn try_import_the_same_assignment_v2() { setup_peer_with_view(overseer, &peer_b, view![hash], ValidationVersion::V3).await; setup_peer_with_view(overseer, &peer_c, view![hash], ValidationVersion::V3).await; - // Set up a gossip topology, where a, b, c and d are topology neighboors to the node under + // Set up a gossip topology, where a, b, c and d are topology neighbors to the node under // testing. let peers_with_optional_peer_id = peers .iter() @@ -744,7 +744,7 @@ fn peer_sending_us_the_same_we_just_sent_them_is_ok() { .iter() .map(|(peer_id, authority)| (Some(*peer_id), authority.clone())) .collect_vec(); - // Setup a topology where peer_a is neigboor to current node. + // Setup a topology where peer_a is neighbor to current node. setup_gossip_topology( overseer, make_gossip_topology(1, &peers_with_optional_peer_id, &[0], &[2], 1), @@ -850,7 +850,7 @@ fn import_approval_happy_path_v1_v2_peers() { .iter() .map(|(peer_id, authority)| (Some(*peer_id), authority.clone())) .collect_vec(); - // Set up a gossip topology, where a, b, and c are topology neighboors to the node. + // Set up a gossip topology, where a, b, and c are topology neighbors to the node. setup_gossip_topology( overseer, make_gossip_topology(1, &peers_with_optional_peer_id, &[0, 1], &[2, 4], 3), @@ -972,7 +972,7 @@ fn import_approval_happy_path_v2() { .iter() .map(|(peer_id, authority)| (Some(*peer_id), authority.clone())) .collect_vec(); - // Set up a gossip topology, where a, b, and c are topology neighboors to the node. + // Set up a gossip topology, where a, b, and c are topology neighbors to the node. setup_gossip_topology( overseer, make_gossip_topology(1, &peers_with_optional_peer_id, &[0, 1], &[2, 4], 3), @@ -1083,7 +1083,7 @@ fn multiple_assignments_covered_with_one_approval_vote() { .iter() .map(|(peer_id, authority)| (Some(*peer_id), authority.clone())) .collect_vec(); - // Set up a gossip topology, where a, b, and c, d are topology neighboors to the node. + // Set up a gossip topology, where a, b, and c, d are topology neighbors to the node. setup_gossip_topology( overseer, make_gossip_topology(1, &peers_with_optional_peer_id, &[0, 1], &[2, 4], 3), @@ -1273,7 +1273,7 @@ fn unify_with_peer_multiple_assignments_covered_with_one_approval_vote() { .iter() .map(|(peer_id, authority)| (Some(*peer_id), authority.clone())) .collect_vec(); - // Set up a gossip topology, where a, b, and c, d are topology neighboors to the node. + // Set up a gossip topology, where a, b, and c, d are topology neighbors to the node. setup_gossip_topology( overseer, make_gossip_topology(1, &peers_with_optional_peer_id, &[0, 1], &[2, 4], 3), @@ -1631,7 +1631,7 @@ fn update_peer_view() { .iter() .map(|(peer_id, authority)| (Some(*peer_id), authority.clone())) .collect_vec(); - // Setup a topology where peer_a is neigboor to current node. + // Setup a topology where peer_a is neighbor to current node. setup_gossip_topology( overseer, make_gossip_topology(1, &peers_with_optional_peer_id, &[0], &[2], 1), @@ -1758,7 +1758,7 @@ fn update_peer_view() { assert!(state.blocks.get(&hash_c).unwrap().known_by.get(peer).is_none()); } -// Tests that updating the known peer_id for a given authorithy updates the topology +// Tests that updating the known peer_id for a given authority updates the topology // and sends the required messages #[test] fn update_peer_authority_id() { @@ -1770,9 +1770,9 @@ fn update_peer_authority_id() { let neighbour_x_index = 0; let neighbour_y_index = 2; let local_index = 1; - // X neighbour, we simulate that PeerId is not known in the beginining. + // X neighbour, we simulate that PeerId is not known in the beginning. let neighbour_x = peers.get(neighbour_x_index).unwrap().0; - // Y neighbour, we simulate that PeerId is not known in the beginining. + // Y neighbour, we simulate that PeerId is not known in the beginning. let neighbour_y = peers.get(neighbour_y_index).unwrap().0; let _state = test_harness(State::default(), |mut virtual_overseer| async move { @@ -1814,7 +1814,7 @@ fn update_peer_authority_id() { }) .collect_vec(); - // Setup a topology where peer_a is neigboor to current node. + // Setup a topology where peer_a is neighbor to current node. setup_gossip_topology( overseer, make_gossip_topology( @@ -2053,7 +2053,7 @@ fn sends_assignments_even_when_state_is_approved() { .iter() .map(|(peer_id, authority)| (Some(*peer_id), authority.clone())) .collect_vec(); - // Setup a topology where peer_a is neigboor to current node. + // Setup a topology where peer_a is neighbor to current node. setup_gossip_topology( overseer, make_gossip_topology(1, &peers_with_optional_peer_id, &[0], &[2], 1), @@ -2125,7 +2125,7 @@ fn sends_assignments_even_when_state_is_approved() { } /// Same as `sends_assignments_even_when_state_is_approved_v2` but with `VRFModuloCompact` -/// assignemnts. +/// assignments. #[test] fn sends_assignments_even_when_state_is_approved_v2() { let peers = make_peers_and_authority_ids(8); @@ -2153,7 +2153,7 @@ fn sends_assignments_even_when_state_is_approved_v2() { .iter() .map(|(peer_id, authority)| (Some(*peer_id), authority.clone())) .collect_vec(); - // Setup a topology where peer_a is neigboor to current node. + // Setup a topology where peer_a is neighbor to current node. setup_gossip_topology( overseer, make_gossip_topology(1, &peers_with_optional_peer_id, &[0], &[2], 1), @@ -3509,7 +3509,7 @@ fn import_versioned_approval() { setup_peer_with_view(overseer, &peer_b, view![hash], ValidationVersion::V1).await; setup_peer_with_view(overseer, &peer_c, view![hash], ValidationVersion::V2).await; - // Set up a gossip topology, where a, b, c and d are topology neighboors to the node under + // Set up a gossip topology, where a, b, c and d are topology neighbors to the node under // testing. let peers_with_optional_peer_id = peers .iter() diff --git a/polkadot/node/network/availability-distribution/Cargo.toml b/polkadot/node/network/availability-distribution/Cargo.toml index 182d92cb163179eea85cafeb1c605763ac18cde1..b5636203f166efdbc2d871df778d148c0f9ce3e4 100644 --- a/polkadot/node/network/availability-distribution/Cargo.toml +++ b/polkadot/node/network/availability-distribution/Cargo.toml @@ -10,7 +10,7 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" gum = { package = "tracing-gum", path = "../../gum" } parity-scale-codec = { version = "3.6.1", features = ["std"] } polkadot-primitives = { path = "../../../primitives" } @@ -39,9 +39,9 @@ polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } polkadot-subsystem-bench = { path = "../../subsystem-bench" } -[[test]] +[[bench]] name = "availability-distribution-regression-bench" -path = "tests/availability-distribution-regression-bench.rs" +path = "benches/availability-distribution-regression-bench.rs" harness = false required-features = ["subsystem-benchmarks"] diff --git a/polkadot/node/network/availability-distribution/tests/availability-distribution-regression-bench.rs b/polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs similarity index 53% rename from polkadot/node/network/availability-distribution/tests/availability-distribution-regression-bench.rs rename to polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs index bdab11298d5c257741eecb6bada930b929b110c7..c33674a8f2f926ad0a186bc867aabe843f091e32 100644 --- a/polkadot/node/network/availability-distribution/tests/availability-distribution-regression-bench.rs +++ b/polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs @@ -24,46 +24,62 @@ //! - availability-store use polkadot_subsystem_bench::{ - availability::{benchmark_availability_write, prepare_test, TestDataAvailability, TestState}, + availability::{benchmark_availability_write, prepare_test, TestState}, configuration::TestConfiguration, - utils::{warm_up_and_benchmark, WarmUpOptions}, + usage::BenchmarkUsage, + utils::save_to_file, }; +use std::io::Write; + +const BENCH_COUNT: usize = 50; fn main() -> Result<(), String> { let mut messages = vec![]; let mut config = TestConfiguration::default(); - // A single node effort roughly n_cores * needed_approvals / n_validators = 60 * 30 / 300 - config.n_cores = 6; + // A single node effort roughly + config.n_cores = 10; + config.n_validators = 500; config.num_blocks = 3; config.generate_pov_sizes(); + let state = TestState::new(&config); - let usage = warm_up_and_benchmark( - WarmUpOptions::new(&[ - "availability-distribution", - "bitfield-distribution", - "availability-store", - ]), - || { - let mut state = TestState::new(&config); - let (mut env, _protocol_config) = - prepare_test(config.clone(), &mut state, TestDataAvailability::Write, false); + println!("Benchmarking..."); + let usages: Vec = (0..BENCH_COUNT) + .map(|n| { + print!("\r[{}{}]", "#".repeat(n), "_".repeat(BENCH_COUNT - n)); + std::io::stdout().flush().unwrap(); + let (mut env, _cfgs) = prepare_test( + &state, + polkadot_subsystem_bench::availability::TestDataAvailability::Write, + false, + ); env.runtime().block_on(benchmark_availability_write( "data_availability_write", &mut env, - state, + &state, )) - }, - )?; - println!("{}", usage); + }) + .collect(); + println!("\rDone!{}", " ".repeat(BENCH_COUNT)); + + let average_usage = BenchmarkUsage::average(&usages); + save_to_file( + "charts/availability-distribution-regression-bench.json", + average_usage.to_chart_json().map_err(|e| e.to_string())?, + ) + .map_err(|e| e.to_string())?; + println!("{}", average_usage); - messages.extend(usage.check_network_usage(&[ - ("Received from peers", 443.333, 0.05), - ("Sent to peers", 21818.555, 0.05), + // We expect no variance for received and sent + // but use 0.001 because we operate with floats + messages.extend(average_usage.check_network_usage(&[ + ("Received from peers", 433.3, 0.001), + ("Sent to peers", 18480.0, 0.001), ])); - messages.extend(usage.check_cpu_usage(&[ - ("availability-distribution", 0.011, 0.05), - ("bitfield-distribution", 0.029, 0.05), - ("availability-store", 0.232, 0.05), + messages.extend(average_usage.check_cpu_usage(&[ + ("availability-distribution", 0.012, 0.05), + ("availability-store", 0.153, 0.05), + ("bitfield-distribution", 0.026, 0.05), ])); if messages.is_empty() { diff --git a/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs b/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs index 4e23030aa499003a92b0e7c3eab858bfa5a55f2d..f99002d4188bf584b927510d86ec0ef3eb272baa 100644 --- a/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs +++ b/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs @@ -147,9 +147,7 @@ mod tests { AllMessages, AvailabilityDistributionMessage, RuntimeApiMessage, RuntimeApiRequest, }; use polkadot_node_subsystem_test_helpers as test_helpers; - use polkadot_primitives::{ - vstaging::NodeFeatures, CandidateHash, ExecutorParams, Hash, ValidatorIndex, - }; + use polkadot_primitives::{CandidateHash, ExecutorParams, Hash, NodeFeatures, ValidatorIndex}; use test_helpers::mock::make_ferdie_keystore; use super::*; diff --git a/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs b/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs index 191ee2acd973b2391773a8ca8e5fdcf7032009e5..f478defcaa96530d695eace0acad11057609d8e9 100644 --- a/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs +++ b/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs @@ -348,7 +348,7 @@ impl RunningTask { async fn do_request( &mut self, validator: &AuthorityDiscoveryId, - nerwork_error_freq: &mut gum::Freq, + network_error_freq: &mut gum::Freq, canceled_freq: &mut gum::Freq, ) -> std::result::Result { gum::trace!( @@ -395,7 +395,7 @@ impl RunningTask { }, Err(RequestError::NetworkError(err)) => { gum::warn_if_frequent!( - freq: nerwork_error_freq, + freq: network_error_freq, max_rate: gum::Times::PerHour(100), target: LOG_TARGET, origin = ?validator, diff --git a/polkadot/node/network/availability-distribution/src/requester/tests.rs b/polkadot/node/network/availability-distribution/src/requester/tests.rs index 2f5d900b037e322e92f0e203fbb8f2a1fcf43492..0dedd4f091acd692c5b319f5669c1356bf335e2b 100644 --- a/polkadot/node/network/availability-distribution/src/requester/tests.rs +++ b/polkadot/node/network/availability-distribution/src/requester/tests.rs @@ -25,7 +25,7 @@ use polkadot_node_primitives::{BlockData, ErasureChunk, PoV}; use polkadot_node_subsystem_test_helpers::mock::new_leaf; use polkadot_node_subsystem_util::runtime::RuntimeInfo; use polkadot_primitives::{ - vstaging::NodeFeatures, BlockNumber, CoreState, ExecutorParams, GroupIndex, Hash, Id as ParaId, + BlockNumber, CoreState, ExecutorParams, GroupIndex, Hash, Id as ParaId, NodeFeatures, ScheduledCore, SessionIndex, SessionInfo, }; use sp_core::traits::SpawnNamed; diff --git a/polkadot/node/network/availability-distribution/src/tests/state.rs b/polkadot/node/network/availability-distribution/src/tests/state.rs index 66a8d8fcdcf9ac4db556d872c121e25832bf21cb..1d814b4fd0edd9add989dd3191500056d39539c8 100644 --- a/polkadot/node/network/availability-distribution/src/tests/state.rs +++ b/polkadot/node/network/availability-distribution/src/tests/state.rs @@ -47,8 +47,8 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_primitives::{ - vstaging::NodeFeatures, CandidateHash, CoreState, ExecutorParams, GroupIndex, Hash, - Id as ParaId, ScheduledCore, SessionInfo, ValidatorIndex, + CandidateHash, CoreState, ExecutorParams, GroupIndex, Hash, Id as ParaId, NodeFeatures, + ScheduledCore, SessionInfo, ValidatorIndex, }; use test_helpers::mock::{make_ferdie_keystore, new_leaf}; diff --git a/polkadot/node/network/availability-recovery/Cargo.toml b/polkadot/node/network/availability-recovery/Cargo.toml index 23c4148fa858f7530405d9bd08f79625a40eb968..dd0e0c432345ed7006fa4a18fbb05bc9efca8538 100644 --- a/polkadot/node/network/availability-recovery/Cargo.toml +++ b/polkadot/node/network/availability-recovery/Cargo.toml @@ -10,13 +10,13 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.21" -tokio = "1.24.2" +futures = "0.3.30" +tokio = "1.37" schnellru = "0.2.1" rand = "0.8.5" fatality = "0.0.6" thiserror = { workspace = true } -async-trait = "0.1.74" +async-trait = "0.1.79" gum = { package = "tracing-gum", path = "../../gum" } polkadot-erasure-coding = { path = "../../../erasure-coding" } @@ -30,7 +30,7 @@ sc-network = { path = "../../../../substrate/client/network" } [dev-dependencies] assert_matches = "1.4.0" -env_logger = "0.9.0" +env_logger = "0.11" futures-timer = "3.0.2" log = { workspace = true, default-features = true } diff --git a/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs b/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs index 42b1787e04504968c5c379a5e09b5214b760509e..46a38516898f2eea48602d951e928f94838c592f 100644 --- a/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs +++ b/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs @@ -14,9 +14,9 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! availability-write regression tests +//! availability-read regression tests //! -//! Availability write benchmark based on Kusama parameters and scale. +//! Availability read benchmark based on Kusama parameters and scale. //! //! Subsystems involved: //! - availability-recovery @@ -27,8 +27,12 @@ use polkadot_subsystem_bench::{ TestDataAvailability, TestState, }, configuration::TestConfiguration, - utils::{warm_up_and_benchmark, WarmUpOptions}, + usage::BenchmarkUsage, + utils::save_to_file, }; +use std::io::Write; + +const BENCH_COUNT: usize = 50; fn main() -> Result<(), String> { let mut messages = vec![]; @@ -38,27 +42,39 @@ fn main() -> Result<(), String> { config.num_blocks = 3; config.generate_pov_sizes(); - let usage = warm_up_and_benchmark(WarmUpOptions::new(&["availability-recovery"]), || { - let mut state = TestState::new(&config); - let (mut env, _protocol_config) = prepare_test( - config.clone(), - &mut state, - TestDataAvailability::Read(options.clone()), - false, - ); - env.runtime().block_on(benchmark_availability_read( - "data_availability_read", - &mut env, - state, - )) - })?; - println!("{}", usage); + let state = TestState::new(&config); + + println!("Benchmarking..."); + let usages: Vec = (0..BENCH_COUNT) + .map(|n| { + print!("\r[{}{}]", "#".repeat(n), "_".repeat(BENCH_COUNT - n)); + std::io::stdout().flush().unwrap(); + let (mut env, _cfgs) = + prepare_test(&state, TestDataAvailability::Read(options.clone()), false); + env.runtime().block_on(benchmark_availability_read( + "data_availability_read", + &mut env, + &state, + )) + }) + .collect(); + println!("\rDone!{}", " ".repeat(BENCH_COUNT)); + + let average_usage = BenchmarkUsage::average(&usages); + save_to_file( + "charts/availability-recovery-regression-bench.json", + average_usage.to_chart_json().map_err(|e| e.to_string())?, + ) + .map_err(|e| e.to_string())?; + println!("{}", average_usage); - messages.extend(usage.check_network_usage(&[ - ("Received from peers", 307200.000, 0.05), - ("Sent to peers", 1.667, 0.05), + // We expect no variance for received and sent + // but use 0.001 because we operate with floats + messages.extend(average_usage.check_network_usage(&[ + ("Received from peers", 307200.000, 0.001), + ("Sent to peers", 1.667, 0.001), ])); - messages.extend(usage.check_cpu_usage(&[("availability-recovery", 11.500, 0.05)])); + messages.extend(average_usage.check_cpu_usage(&[("availability-recovery", 11.500, 0.05)])); if messages.is_empty() { Ok(()) diff --git a/polkadot/node/network/availability-recovery/src/lib.rs b/polkadot/node/network/availability-recovery/src/lib.rs index fb8064878f4f6c02236afbede9f8e91b8f2dd594..94b9d9546cdecd4a244bc3de9e10c1db4c3c066d 100644 --- a/polkadot/node/network/availability-recovery/src/lib.rs +++ b/polkadot/node/network/availability-recovery/src/lib.rs @@ -408,7 +408,7 @@ async fn handle_recover( ) -> error::Result<()> { let candidate_hash = receipt.hash(); - let span = jaeger::Span::new(candidate_hash, "availbility-recovery") + let span = jaeger::Span::new(candidate_hash, "availability-recovery") .with_stage(jaeger::Stage::AvailabilityRecovery); if let Some(result) = diff --git a/polkadot/node/network/availability-recovery/src/metrics.rs b/polkadot/node/network/availability-recovery/src/metrics.rs index d82a8f9ae5faf662e05c7a8dcaf731e17756a636..9f4cddc57e43a93089fbb08ec4a18dfd77ffea8d 100644 --- a/polkadot/node/network/availability-recovery/src/metrics.rs +++ b/polkadot/node/network/availability-recovery/src/metrics.rs @@ -31,7 +31,7 @@ struct MetricsInner { chunk_requests_issued: Counter, /// Total number of bytes recovered /// - /// Gets incremented on each succesful recovery + /// Gets incremented on each successful recovery recovered_bytes_total: Counter, /// A counter for finished chunk requests. /// @@ -232,7 +232,7 @@ impl metrics::Metrics for Metrics { )?, full_recoveries_started: prometheus::register( Counter::new( - "polkadot_parachain_availability_recovery_recovieries_started", + "polkadot_parachain_availability_recovery_recoveries_started", "Total number of started recoveries.", )?, registry, diff --git a/polkadot/node/network/bitfield-distribution/Cargo.toml b/polkadot/node/network/bitfield-distribution/Cargo.toml index 0ddb5f643b89d382855de92fe450a5807e6e21ac..6b5b784b7fd899713c4ccbce2f9dec649a50f933 100644 --- a/polkadot/node/network/bitfield-distribution/Cargo.toml +++ b/polkadot/node/network/bitfield-distribution/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] always-assert = "0.1" -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../../gum" } polkadot-primitives = { path = "../../../primitives" } @@ -30,6 +30,6 @@ sp-keystore = { path = "../../../../substrate/primitives/keystore" } sp-keyring = { path = "../../../../substrate/primitives/keyring" } maplit = "1.0.2" log = { workspace = true, default-features = true } -env_logger = "0.9.0" +env_logger = "0.11" assert_matches = "1.4.0" rand_chacha = "0.3.1" diff --git a/polkadot/node/network/bitfield-distribution/src/metrics.rs b/polkadot/node/network/bitfield-distribution/src/metrics.rs index 71d8a01300f25aea09099b882a00ff5df9cba491..bd956bcbe4a3ebf9038073d8bed78ac50da756d4 100644 --- a/polkadot/node/network/bitfield-distribution/src/metrics.rs +++ b/polkadot/node/network/bitfield-distribution/src/metrics.rs @@ -69,14 +69,14 @@ impl MetricsTrait for Metrics { let metrics = MetricsInner { sent_own_availability_bitfields: prometheus::register( prometheus::Counter::new( - "polkadot_parachain_sent_own_availabilty_bitfields_total", + "polkadot_parachain_sent_own_availability_bitfields_total", "Number of own availability bitfields sent to other peers.", )?, registry, )?, received_availability_bitfields: prometheus::register( prometheus::Counter::new( - "polkadot_parachain_received_availabilty_bitfields_total", + "polkadot_parachain_received_availability_bitfields_total", "Number of valid availability bitfields received from other peers.", )?, registry, diff --git a/polkadot/node/network/bitfield-distribution/src/tests.rs b/polkadot/node/network/bitfield-distribution/src/tests.rs index ba2434ea47d69469e812d6274b17b43c3b6b5ee3..188b51ebcccae89fd6f69410fd054d25eeabbb01 100644 --- a/polkadot/node/network/bitfield-distribution/src/tests.rs +++ b/polkadot/node/network/bitfield-distribution/src/tests.rs @@ -150,7 +150,7 @@ fn receive_invalid_signature() { let signing_context = SigningContext { session_index: 1, parent_hash: hash_a }; - // another validator not part of the validatorset + // another validator not part of the validator set let keystore: KeystorePtr = Arc::new(MemoryKeystore::new()); let malicious = Keystore::sr25519_generate_new(&*keystore, ValidatorId::ID, None) .expect("Malicious key created"); diff --git a/polkadot/node/network/bridge/Cargo.toml b/polkadot/node/network/bridge/Cargo.toml index 2e889fc30eb90063f07c05e34e51d863a3d32f59..9c2423e7e5810c3406ab4f40c219dbd8001245bb 100644 --- a/polkadot/node/network/bridge/Cargo.toml +++ b/polkadot/node/network/bridge/Cargo.toml @@ -11,8 +11,8 @@ workspace = true [dependencies] always-assert = "0.1" -async-trait = "0.1.74" -futures = "0.3.21" +async-trait = "0.1.79" +futures = "0.3.30" gum = { package = "tracing-gum", path = "../../gum" } polkadot-primitives = { path = "../../../primitives" } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } diff --git a/polkadot/node/network/bridge/src/lib.rs b/polkadot/node/network/bridge/src/lib.rs index ddce99d5c2a8a721164e73ea61ed989b78cdfbdc..0305aaa067ccd5dbeb571ee3c9531417ac32818b 100644 --- a/polkadot/node/network/bridge/src/lib.rs +++ b/polkadot/node/network/bridge/src/lib.rs @@ -102,6 +102,30 @@ struct SharedInner { collation_peers: HashMap, } +// Counts the number of peers that are connectioned using `version` +fn count_peers_by_version(peers: &HashMap) -> HashMap { + let mut by_version_count = HashMap::new(); + for peer in peers.values() { + *(by_version_count.entry(peer.version).or_default()) += 1; + } + by_version_count +} + +// Notes the peer count +fn note_peers_count(metrics: &Metrics, shared: &Shared) { + let guard = shared.0.lock(); + let validation_stats = count_peers_by_version(&guard.validation_peers); + let collation_stats = count_peers_by_version(&guard.collation_peers); + + for (version, count) in validation_stats { + metrics.note_peer_count(PeerSet::Validation, version, count) + } + + for (version, count) in collation_stats { + metrics.note_peer_count(PeerSet::Collation, version, count) + } +} + pub(crate) enum Mode { Syncing(Box), Active, diff --git a/polkadot/node/network/bridge/src/rx/mod.rs b/polkadot/node/network/bridge/src/rx/mod.rs index 11ac73259e3a178f418f4a6aa316e337acb31322..0a4497fc4b5a22639e516598a15557f08227c969 100644 --- a/polkadot/node/network/bridge/src/rx/mod.rs +++ b/polkadot/node/network/bridge/src/rx/mod.rs @@ -262,7 +262,6 @@ async fn handle_validation_message( } metrics.on_peer_connected(peer_set, version); - metrics.note_peer_count(peer_set, version, peer_map.len()); shared.local_view.clone().unwrap_or(View::default()) }; @@ -320,8 +319,6 @@ async fn handle_validation_message( let w = peer_map.remove(&peer).is_some(); metrics.on_peer_disconnected(peer_set, version); - metrics.note_peer_count(peer_set, version, peer_map.len()); - w }; @@ -524,7 +521,6 @@ async fn handle_collation_message( } metrics.on_peer_connected(peer_set, version); - metrics.note_peer_count(peer_set, version, peer_map.len()); shared.local_view.clone().unwrap_or(View::default()) }; @@ -575,7 +571,6 @@ async fn handle_collation_message( let w = peer_map.remove(&peer).is_some(); metrics.on_peer_disconnected(peer_set, version); - metrics.note_peer_count(peer_set, version, peer_map.len()); w }; @@ -832,6 +827,7 @@ where &metrics, ¬ification_sinks, ); + note_peers_count(&metrics, &shared); } } }, diff --git a/polkadot/node/network/collator-protocol/Cargo.toml b/polkadot/node/network/collator-protocol/Cargo.toml index cfd88df958ce94f65da14eeab91040672cc4bf7a..2c7135742f56890a7ad147e95c545cbebde04c9b 100644 --- a/polkadot/node/network/collator-protocol/Cargo.toml +++ b/polkadot/node/network/collator-protocol/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] bitvec = { version = "1.0.1", default-features = false, features = ["alloc"] } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3" gum = { package = "tracing-gum", path = "../../gum" } @@ -30,7 +30,7 @@ tokio-util = "0.7.1" [dev-dependencies] log = { workspace = true, default-features = true } -env_logger = "0.9.0" +env_logger = "0.11" assert_matches = "1.4.0" sp-core = { path = "../../../../substrate/primitives/core", features = ["std"] } diff --git a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs index 9f306f288a16207d8ee9a3be8ab3771625f46d7e..e6aa55235b7a8094d54fff1c6b5d15a9d0c198ab 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs @@ -203,20 +203,40 @@ struct PeerData { version: CollationVersion, } +/// A type wrapping a collation and it's designated core index. +struct CollationWithCoreIndex(Collation, CoreIndex); + +impl CollationWithCoreIndex { + /// Returns inner collation ref. + pub fn collation(&self) -> &Collation { + &self.0 + } + + /// Returns inner collation mut ref. + pub fn collation_mut(&mut self) -> &mut Collation { + &mut self.0 + } + + /// Returns inner core index. + pub fn core_index(&self) -> &CoreIndex { + &self.1 + } +} + struct PerRelayParent { prospective_parachains_mode: ProspectiveParachainsMode, - /// Validators group responsible for backing candidates built + /// Per core index validators group responsible for backing candidates built /// on top of this relay parent. - validator_group: ValidatorGroup, + validator_group: HashMap, /// Distributed collations. - collations: HashMap, + collations: HashMap, } impl PerRelayParent { fn new(mode: ProspectiveParachainsMode) -> Self { Self { prospective_parachains_mode: mode, - validator_group: ValidatorGroup::default(), + validator_group: HashMap::default(), collations: HashMap::new(), } } @@ -350,6 +370,7 @@ async fn distribute_collation( pov: PoV, parent_head_data: HeadData, result_sender: Option>, + core_index: CoreIndex, ) -> Result<()> { let candidate_relay_parent = receipt.descriptor.relay_parent; let candidate_hash = receipt.hash(); @@ -422,7 +443,22 @@ async fn distribute_collation( ); } - let our_core = our_cores[0]; + // Double check that the specified `core_index` is among the ones our para has assignments for. + if !our_cores.iter().any(|assigned_core| assigned_core == &core_index) { + gum::warn!( + target: LOG_TARGET, + para_id = %id, + relay_parent = ?candidate_relay_parent, + cores = ?our_cores, + ?core_index, + "Attempting to distribute collation for a core we are not assigned to ", + ); + + return Ok(()) + } + + let our_core = core_index; + // Determine the group on that core. // // When prospective parachains are disabled, candidate relay parent here is @@ -464,10 +500,12 @@ async fn distribute_collation( "Accepted collation, connecting to validators." ); - let validators_at_relay_parent = &mut per_relay_parent.validator_group.validators; - if validators_at_relay_parent.is_empty() { - *validators_at_relay_parent = validators; - } + // Insert validator group for the `core_index` at relay parent. + per_relay_parent.validator_group.entry(core_index).or_insert_with(|| { + let mut group = ValidatorGroup::default(); + group.validators = validators; + group + }); // Update a set of connected validators if necessary. connect_to_validators(ctx, &state.validator_groups_buf).await; @@ -484,7 +522,10 @@ async fn distribute_collation( per_relay_parent.collations.insert( candidate_hash, - Collation { receipt, pov, parent_head_data, status: CollationStatus::Created }, + CollationWithCoreIndex( + Collation { receipt, pov, parent_head_data, status: CollationStatus::Created }, + core_index, + ), ); // If prospective parachains are disabled, a leaf should be known to peer. @@ -690,7 +731,10 @@ async fn advertise_collation( advertisement_timeouts: &mut FuturesUnordered, metrics: &Metrics, ) { - for (candidate_hash, collation) in per_relay_parent.collations.iter_mut() { + for (candidate_hash, collation_and_core) in per_relay_parent.collations.iter_mut() { + let core_index = *collation_and_core.core_index(); + let collation = collation_and_core.collation_mut(); + // Check that peer will be able to request the collation. if let CollationVersion::V1 = protocol_version { if per_relay_parent.prospective_parachains_mode.is_enabled() { @@ -704,11 +748,17 @@ async fn advertise_collation( } } - let should_advertise = - per_relay_parent - .validator_group - .should_advertise_to(candidate_hash, peer_ids, &peer); + let Some(validator_group) = per_relay_parent.validator_group.get_mut(&core_index) else { + gum::debug!( + target: LOG_TARGET, + ?relay_parent, + ?core_index, + "Skipping advertising to validator, validator group for core not found", + ); + return + }; + let should_advertise = validator_group.should_advertise_to(candidate_hash, peer_ids, &peer); match should_advertise { ShouldAdvertiseTo::Yes => {}, ShouldAdvertiseTo::NotAuthority | ShouldAdvertiseTo::AlreadyAdvertised => { @@ -756,9 +806,7 @@ async fn advertise_collation( )) .await; - per_relay_parent - .validator_group - .advertised_to_peer(candidate_hash, &peer_ids, peer); + validator_group.advertised_to_peer(candidate_hash, &peer_ids, peer); advertisement_timeouts.push(ResetInterestTimeout::new( *candidate_hash, @@ -790,6 +838,7 @@ async fn process_msg( pov, parent_head_data, result_sender, + core_index, } => { let _span1 = state .span_per_relay_parent @@ -820,6 +869,7 @@ async fn process_msg( pov, parent_head_data, result_sender, + core_index, ) .await?; }, @@ -1053,7 +1103,7 @@ async fn handle_incoming_request( }; let mode = per_relay_parent.prospective_parachains_mode; - let collation = match &req { + let collation_with_core = match &req { VersionedCollationRequest::V1(_) if !mode.is_enabled() => per_relay_parent.collations.values_mut().next(), VersionedCollationRequest::V2(req) => @@ -1070,22 +1120,24 @@ async fn handle_incoming_request( return Ok(()) }, }; - let (receipt, pov, parent_head_data) = if let Some(collation) = collation { - collation.status.advance_to_requested(); - ( - collation.receipt.clone(), - collation.pov.clone(), - collation.parent_head_data.clone(), - ) - } else { - gum::warn!( - target: LOG_TARGET, - relay_parent = %relay_parent, - "received a `RequestCollation` for a relay parent we don't have collation stored.", - ); + let (receipt, pov, parent_head_data) = + if let Some(collation_with_core) = collation_with_core { + let collation = collation_with_core.collation_mut(); + collation.status.advance_to_requested(); + ( + collation.receipt.clone(), + collation.pov.clone(), + collation.parent_head_data.clone(), + ) + } else { + gum::warn!( + target: LOG_TARGET, + relay_parent = %relay_parent, + "received a `RequestCollation` for a relay parent we don't have collation stored.", + ); - return Ok(()) - }; + return Ok(()) + }; state.metrics.on_collation_sent_requested(); @@ -1340,7 +1392,9 @@ where .remove(removed) .map(|per_relay_parent| per_relay_parent.collations) .unwrap_or_default(); - for collation in collations.into_values() { + for collation_with_core in collations.into_values() { + let collation = collation_with_core.collation(); + let candidate_hash = collation.receipt.hash(); state.collation_result_senders.remove(&candidate_hash); state.validator_groups_buf.remove_candidate(&candidate_hash); @@ -1477,7 +1531,7 @@ async fn run_inner( continue }; - let next_collation = { + let next_collation_with_core = { let per_relay_parent = match state.per_relay_parent.get(&relay_parent) { Some(per_relay_parent) => per_relay_parent, None => continue, @@ -1497,7 +1551,8 @@ async fn run_inner( } }; - if let Some(collation) = next_collation { + if let Some(collation_with_core) = next_collation_with_core { + let collation = collation_with_core.collation(); let receipt = collation.receipt.clone(); let pov = collation.pov.clone(); let parent_head_data = collation.parent_head_data.clone(); diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs index 38e6780eb7d201ed4840cedf2d3bf0c5c310cf07..de561e9f77fcb1e2490cb5f5216064094924bfbc 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs @@ -45,9 +45,8 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_util::{reputation::add_reputation, TimeoutExt}; use polkadot_primitives::{ - vstaging::NodeFeatures, AuthorityDiscoveryId, CollatorPair, ExecutorParams, GroupIndex, - GroupRotationInfo, IndexedVec, ScheduledCore, SessionIndex, SessionInfo, ValidatorId, - ValidatorIndex, + AuthorityDiscoveryId, CollatorPair, ExecutorParams, GroupIndex, GroupRotationInfo, IndexedVec, + NodeFeatures, ScheduledCore, SessionIndex, SessionInfo, ValidatorId, ValidatorIndex, }; use polkadot_primitives_test_helpers::TestCandidateBuilder; use test_helpers::mock::new_leaf; @@ -377,6 +376,7 @@ async fn distribute_collation_with_receipt( pov: pov.clone(), parent_head_data: HeadData(vec![1, 2, 3]), result_sender: None, + core_index: CoreIndex(0), }, ) .await; diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs index e419cd5444f5aabd24e1abe76d82caa6358fe179..707053545630a0a11603ccda030200eed7b5cfe2 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs @@ -277,6 +277,7 @@ fn distribute_collation_from_implicit_view() { pov: pov.clone(), parent_head_data: HeadData(vec![1, 2, 3]), result_sender: None, + core_index: CoreIndex(0), }, ) .await; @@ -358,6 +359,7 @@ fn distribute_collation_up_to_limit() { pov: pov.clone(), parent_head_data: HeadData(vec![1, 2, 3]), result_sender: None, + core_index: CoreIndex(0), }, ) .await; diff --git a/polkadot/node/network/collator-protocol/src/collator_side/validators_buffer.rs b/polkadot/node/network/collator-protocol/src/collator_side/validators_buffer.rs index 1533f2eda5a57d332ea899e1dd81764f31c86dad..fbb3ff4328a51a7e0b726d03750e814840fe1751 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/validators_buffer.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/validators_buffer.rs @@ -45,14 +45,22 @@ use futures::FutureExt; use polkadot_node_network_protocol::PeerId; use polkadot_primitives::{AuthorityDiscoveryId, CandidateHash, GroupIndex, SessionIndex}; +/// Elastic scaling: how many candidates per relay chain block the collator supports building. +pub const MAX_CHAINED_CANDIDATES_PER_RCB: NonZeroUsize = match NonZeroUsize::new(3) { + Some(cap) => cap, + None => panic!("max candidates per rcb cannot be zero"), +}; + /// The ring buffer stores at most this many unique validator groups. /// /// This value should be chosen in way that all groups assigned to our para -/// in the view can fit into the buffer. -pub const VALIDATORS_BUFFER_CAPACITY: NonZeroUsize = match NonZeroUsize::new(3) { - Some(cap) => cap, - None => panic!("buffer capacity must be non-zero"), -}; +/// in the view can fit into the buffer multiplied by amount of candidates we support per relay +/// chain block in the case of elastic scaling. +pub const VALIDATORS_BUFFER_CAPACITY: NonZeroUsize = + match NonZeroUsize::new(3 * MAX_CHAINED_CANDIDATES_PER_RCB.get()) { + Some(cap) => cap, + None => panic!("buffer capacity must be non-zero"), + }; /// Unique identifier of a validators group. #[derive(Debug)] diff --git a/polkadot/node/network/dispute-distribution/Cargo.toml b/polkadot/node/network/dispute-distribution/Cargo.toml index 14d59d04f2bff7b838717edb927401035537ff4d..ff9c302c73146d501114728c4a483e4465d199a9 100644 --- a/polkadot/node/network/dispute-distribution/Cargo.toml +++ b/polkadot/node/network/dispute-distribution/Cargo.toml @@ -10,7 +10,7 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../../gum" } derive_more = "0.99.17" @@ -31,7 +31,7 @@ indexmap = "2.0.0" [dev-dependencies] async-channel = "1.8.0" -async-trait = "0.1.74" +async-trait = "0.1.79" polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } sp-keyring = { path = "../../../../substrate/primitives/keyring" } sp-tracing = { path = "../../../../substrate/primitives/tracing" } diff --git a/polkadot/node/network/dispute-distribution/src/sender/mod.rs b/polkadot/node/network/dispute-distribution/src/sender/mod.rs index f4acc72318ad448b99a106b460b84eefabd5c96d..8187f20146c7ae43fe36601c180ebe9af2c2faeb 100644 --- a/polkadot/node/network/dispute-distribution/src/sender/mod.rs +++ b/polkadot/node/network/dispute-distribution/src/sender/mod.rs @@ -76,7 +76,7 @@ pub struct DisputeSender { /// Value is the hash that was used for the query. active_sessions: HashMap, - /// All ongoing dispute sendings this subsystem is aware of. + /// All ongoing dispute sending this subsystem is aware of. /// /// Using an `IndexMap` so items can be iterated in the order of insertion. disputes: IndexMap>, @@ -105,7 +105,7 @@ struct WaitForActiveDisputesState { #[overseer::contextbounds(DisputeDistribution, prefix = self::overseer)] impl DisputeSender { - /// Create a new `DisputeSender` which can be used to start dispute sendings. + /// Create a new `DisputeSender` which can be used to start dispute sending. pub fn new(tx: NestingSender, metrics: Metrics) -> Self { Self { active_heads: Vec::new(), @@ -362,7 +362,7 @@ async fn get_active_session_indices( runtime: &mut RuntimeInfo, active_heads: &Vec, ) -> Result> { - let mut indeces = HashMap::new(); + let mut indices = HashMap::new(); // Iterate all heads we track as active and fetch the child' session indices. for head in active_heads { let session_index = runtime.get_session_index_for_child(ctx.sender(), *head).await?; @@ -372,9 +372,9 @@ async fn get_active_session_indices( { gum::debug!(target: LOG_TARGET, ?err, ?session_index, "Can't cache SessionInfo"); } - indeces.insert(session_index, *head); + indices.insert(session_index, *head); } - Ok(indeces) + Ok(indices) } /// Retrieve Set of active disputes from the dispute coordinator. diff --git a/polkadot/node/network/dispute-distribution/src/tests/mock.rs b/polkadot/node/network/dispute-distribution/src/tests/mock.rs index e6a49f14c094c9ce98030dda76f1ea2baf88ec87..ccc050233e8408d09e8cbe6261a44e682939d4b9 100644 --- a/polkadot/node/network/dispute-distribution/src/tests/mock.rs +++ b/polkadot/node/network/dispute-distribution/src/tests/mock.rs @@ -163,7 +163,7 @@ pub fn make_dispute_message( let invalid_vote = make_explicit_signed(MOCK_VALIDATORS[invalid_validator.0 as usize], candidate_hash, false); gum::trace!( - "Passed time for invald vote: {:#?}", + "Passed time for invalid vote: {:#?}", Instant::now().saturating_duration_since(before_request) ); DisputeMessage::from_signed_statements( diff --git a/polkadot/node/network/dispute-distribution/src/tests/mod.rs b/polkadot/node/network/dispute-distribution/src/tests/mod.rs index 880d1b18032cc67c546d77a4b68ad59af61ee832..5ad790fb01c22e511a2d5ae3d96c0c96f49bfc78 100644 --- a/polkadot/node/network/dispute-distribution/src/tests/mod.rs +++ b/polkadot/node/network/dispute-distribution/src/tests/mod.rs @@ -57,8 +57,8 @@ use polkadot_node_subsystem_test_helpers::{ subsystem_test_harness, TestSubsystemContextHandle, }; use polkadot_primitives::{ - vstaging::NodeFeatures, AuthorityDiscoveryId, CandidateHash, CandidateReceipt, ExecutorParams, - Hash, SessionIndex, SessionInfo, + AuthorityDiscoveryId, CandidateHash, CandidateReceipt, ExecutorParams, Hash, NodeFeatures, + SessionIndex, SessionInfo, }; use self::mock::{ diff --git a/polkadot/node/network/gossip-support/Cargo.toml b/polkadot/node/network/gossip-support/Cargo.toml index 8d0edc206d728b16e0448c3339c1719f26d81413..2d6f2f954c667a8a97aff093f1fa734f6afce9aa 100644 --- a/polkadot/node/network/gossip-support/Cargo.toml +++ b/polkadot/node/network/gossip-support/Cargo.toml @@ -22,7 +22,7 @@ polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } polkadot-primitives = { path = "../../../primitives" } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" rand = { version = "0.8.5", default-features = false } rand_chacha = { version = "0.3.1", default-features = false } @@ -37,7 +37,7 @@ sp-authority-discovery = { path = "../../../../substrate/primitives/authority-di polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } assert_matches = "1.4.0" -async-trait = "0.1.74" +async-trait = "0.1.79" parking_lot = "0.12.1" lazy_static = "1.4.0" quickcheck = "1.0.3" diff --git a/polkadot/node/network/gossip-support/src/lib.rs b/polkadot/node/network/gossip-support/src/lib.rs index 9f33cd5d8a31697f5d42030fc043643ef1ee247f..4dfdd1f7208f66ec4a787ffe8bc7f72c41575c9d 100644 --- a/polkadot/node/network/gossip-support/src/lib.rs +++ b/polkadot/node/network/gossip-support/src/lib.rs @@ -508,7 +508,7 @@ where ); } let pretty = PrettyAuthorities(unconnected_authorities); - gum::info!( + gum::debug!( target: LOG_TARGET, ?connected_ratio, ?absolute_connected, diff --git a/polkadot/node/network/gossip-support/src/tests.rs b/polkadot/node/network/gossip-support/src/tests.rs index 6817c85f98d87c03b3c252783c464efed88dfdb6..cce78df38f308e34e1c663ab2c27850c818e26f8 100644 --- a/polkadot/node/network/gossip-support/src/tests.rs +++ b/polkadot/node/network/gossip-support/src/tests.rs @@ -122,7 +122,7 @@ impl MockAuthorityDiscovery { self.authorities.lock().clone() } - fn add_more_authorties( + fn add_more_authorities( &self, new_known: Vec, ) -> HashMap> { @@ -720,7 +720,7 @@ fn issues_update_authorities_after_session() { assert!(overseer.recv().timeout(TIMEOUT).await.is_none()); // 4. Connect more authorities except one - let newly_added = authority_discovery_mock.add_more_authorties(unknown_at_session); + let newly_added = authority_discovery_mock.add_more_authorities(unknown_at_session); let mut newly_added_iter = newly_added.iter(); let unconnected_at_last_retry = newly_added_iter .next() diff --git a/polkadot/node/network/protocol/Cargo.toml b/polkadot/node/network/protocol/Cargo.toml index 7efa0b8ca820d651d77a53a95d61e7672c5f9737..81936364897f926cb2eef810bf917c53e5d4e97a 100644 --- a/polkadot/node/network/protocol/Cargo.toml +++ b/polkadot/node/network/protocol/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] async-channel = "1.8.0" -async-trait = "0.1.74" +async-trait = "0.1.79" hex = "0.4.3" polkadot-primitives = { path = "../../../primitives" } polkadot-node-primitives = { path = "../../primitives" } @@ -19,8 +19,8 @@ polkadot-node-jaeger = { path = "../../jaeger" } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } sc-network = { path = "../../../../substrate/client/network" } sc-authority-discovery = { path = "../../../../substrate/client/authority-discovery" } -strum = { version = "0.24", features = ["derive"] } -futures = "0.3.21" +strum = { version = "0.26.2", features = ["derive"] } +futures = "0.3.30" thiserror = { workspace = true } fatality = "0.0.6" rand = "0.8" diff --git a/polkadot/node/network/protocol/src/grid_topology.rs b/polkadot/node/network/protocol/src/grid_topology.rs index 3c4372a27a2c42e20227adbe15d2cc7fcade3d12..a14d24610722bfad2d26df9526d8e6d7411e6f68 100644 --- a/polkadot/node/network/protocol/src/grid_topology.rs +++ b/polkadot/node/network/protocol/src/grid_topology.rs @@ -89,7 +89,7 @@ impl SessionGridTopology { SessionGridTopology { shuffled_indices, canonical_shuffling, peer_ids } } - /// Updates the known peer ids for the passed authorithies ids. + /// Updates the known peer ids for the passed authorities ids. pub fn update_authority_ids( &mut self, peer_id: PeerId, @@ -313,7 +313,7 @@ impl SessionGridTopologyEntry { self.topology.is_validator(peer) } - /// Updates the known peer ids for the passed authorithies ids. + /// Updates the known peer ids for the passed authorities ids. pub fn update_authority_ids( &mut self, peer_id: PeerId, @@ -345,7 +345,7 @@ impl SessionGridTopologies { self.inner.get(&session).and_then(|val| val.0.as_ref()) } - /// Updates the known peer ids for the passed authorithies ids. + /// Updates the known peer ids for the passed authorities ids. pub fn update_authority_ids( &mut self, peer_id: PeerId, diff --git a/polkadot/node/network/protocol/src/lib.rs b/polkadot/node/network/protocol/src/lib.rs index 7a0ff9f4fa9a26c2ec1f7d97b03d61424e83b98b..4dd94b5eac4fc430d1b28d5d906d922f0e970870 100644 --- a/polkadot/node/network/protocol/src/lib.rs +++ b/polkadot/node/network/protocol/src/lib.rs @@ -871,7 +871,7 @@ pub mod v2 { } /// v3 network protocol types. -/// Purpose is for chaning ApprovalDistributionMessage to +/// Purpose is for changing ApprovalDistributionMessage to /// include more than one assignment and approval in a message. pub mod v3 { use parity_scale_codec::{Decode, Encode}; diff --git a/polkadot/node/network/protocol/src/peer_set.rs b/polkadot/node/network/protocol/src/peer_set.rs index cb329607ad6127024af9e6d6bc3c75c7e813c0e3..d0ae5b4a1bf3c5fa6b70fed76551dffbe75fc7f6 100644 --- a/polkadot/node/network/protocol/src/peer_set.rs +++ b/polkadot/node/network/protocol/src/peer_set.rs @@ -234,7 +234,7 @@ pub enum ValidationVersion { /// The second version. V2 = 2, /// The third version where changes to ApprovalDistributionMessage had been made. - /// The changes are translatable to V2 format untill assignments v2 and approvals + /// The changes are translatable to V2 format until assignments v2 and approvals /// coalescing is enabled through a runtime upgrade. V3 = 3, } diff --git a/polkadot/node/network/protocol/src/request_response/incoming/mod.rs b/polkadot/node/network/protocol/src/request_response/incoming/mod.rs index 4455448386728e2fdc7188f35841947ca8ddd985..1d7c4a63e0c31e9440c1386909fe198a6909f740 100644 --- a/polkadot/node/network/protocol/src/request_response/incoming/mod.rs +++ b/polkadot/node/network/protocol/src/request_response/incoming/mod.rs @@ -47,7 +47,7 @@ where Req: IsRequest + Decode + Encode, Req::Response: Encode, { - /// Create configuration for `NetworkConfiguration::request_response_porotocols` and a + /// Create configuration for `NetworkConfiguration::request_response_protocols` and a /// corresponding typed receiver. /// /// This Register that config with substrate networking and receive incoming requests via the diff --git a/polkadot/node/network/protocol/src/request_response/mod.rs b/polkadot/node/network/protocol/src/request_response/mod.rs index 2fb62f56d104fbab804dc2d64941f06c281efb0f..87217bf084fb9277a3e2930cb67f6a5a5ac1dcc8 100644 --- a/polkadot/node/network/protocol/src/request_response/mod.rs +++ b/polkadot/node/network/protocol/src/request_response/mod.rs @@ -287,7 +287,7 @@ impl Protocol { match self { // Hundreds of validators will start requesting their chunks once they see a candidate // awaiting availability on chain. Given that they will see that block at different - // times (due to network delays), 100 seems big enough to accomodate for "bursts", + // times (due to network delays), 100 seems big enough to accommodate for "bursts", // assuming we can service requests relatively quickly, which would need to be measured // as well. Protocol::ChunkFetchingV1 => 100, diff --git a/polkadot/node/network/protocol/src/request_response/v1.rs b/polkadot/node/network/protocol/src/request_response/v1.rs index ba29b32c4ce036005d691e9e495dc6e42c3f3d81..60eecb69f738912ddb0240c890e2283db7f91a72 100644 --- a/polkadot/node/network/protocol/src/request_response/v1.rs +++ b/polkadot/node/network/protocol/src/request_response/v1.rs @@ -183,7 +183,7 @@ impl IsRequest for AvailableDataFetchingRequest { pub struct StatementFetchingRequest { /// Data needed to locate and identify the needed statement. pub relay_parent: Hash, - /// Hash of candidate that was used create the `CommitedCandidateRecept`. + /// Hash of candidate that was used create the `CommittedCandidateReceipt`. pub candidate_hash: CandidateHash, } diff --git a/polkadot/node/network/statement-distribution/Cargo.toml b/polkadot/node/network/statement-distribution/Cargo.toml index 01f7d818c1c52ffd677379e3a0a9d2f543572d92..d8ae031cbf36dabf2899e61574958ffdbde62921 100644 --- a/polkadot/node/network/statement-distribution/Cargo.toml +++ b/polkadot/node/network/statement-distribution/Cargo.toml @@ -10,7 +10,7 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../../gum" } polkadot-primitives = { path = "../../../primitives" } diff --git a/polkadot/node/network/statement-distribution/src/error.rs b/polkadot/node/network/statement-distribution/src/error.rs index a712ab6da436f813d956df38c1cd0da9f02de3be..d7f52162fe2370481d7dd76717ea0f977be98a24 100644 --- a/polkadot/node/network/statement-distribution/src/error.rs +++ b/polkadot/node/network/statement-distribution/src/error.rs @@ -81,6 +81,9 @@ pub enum Error { #[error("Fetching validator groups failed {0:?}")] FetchValidatorGroups(RuntimeApiError), + #[error("Fetching claim queue failed {0:?}")] + FetchClaimQueue(runtime::Error), + #[error("Attempted to share statement when not a validator or not assigned")] InvalidShare, diff --git a/polkadot/node/network/statement-distribution/src/legacy_v1/responder.rs b/polkadot/node/network/statement-distribution/src/legacy_v1/responder.rs index 81e226c4ff8950d8edc9b18d8a3ddef0e0b76d93..8d1683759a0360871b149926b73b8a77ad5bec6a 100644 --- a/polkadot/node/network/statement-distribution/src/legacy_v1/responder.rs +++ b/polkadot/node/network/statement-distribution/src/legacy_v1/responder.rs @@ -64,7 +64,7 @@ pub async fn respond( // late, as each requester having the data will help distributing it. // 2. If we take too long, the requests timing out will not yet have had any data sent, thus // we wasted no bandwidth. - // 3. If the queue is full, requestes will get an immediate error instead of running in a + // 3. If the queue is full, requests will get an immediate error instead of running in a // timeout, thus requesters can immediately try another peer and be faster. // // From this perspective we would not want parallel response sending at all, but we don't diff --git a/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs b/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs index 2766ec9815af1e87051603c1b45304c4758cf9f9..7d355cc887255a377ce54f786f68b13cd80895c5 100644 --- a/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs +++ b/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs @@ -43,7 +43,7 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_test_helpers::mock::{make_ferdie_keystore, new_leaf}; use polkadot_primitives::{ - vstaging::NodeFeatures, ExecutorParams, GroupIndex, Hash, HeadData, Id as ParaId, IndexedVec, + ExecutorParams, GroupIndex, Hash, HeadData, Id as ParaId, IndexedVec, NodeFeatures, SessionInfo, ValidationCode, }; use polkadot_primitives_test_helpers::{ @@ -1494,7 +1494,7 @@ fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing( Err(()) => {} ); - // And now the succeding request from peer_b: + // And now the succeeding request from peer_b: let (pending_response, response_rx) = oneshot::channel(); let inner_req = StatementFetchingRequest { relay_parent: metadata.relay_parent, diff --git a/polkadot/node/network/statement-distribution/src/v2/cluster.rs b/polkadot/node/network/statement-distribution/src/v2/cluster.rs index c09916e56201f20a0fbbbac349681b512f1fd5fe..87cdc389cb35973403bdf33ab4522d32174d7ec1 100644 --- a/polkadot/node/network/statement-distribution/src/v2/cluster.rs +++ b/polkadot/node/network/statement-distribution/src/v2/cluster.rs @@ -430,8 +430,8 @@ impl ClusterTracker { /// /// Normally we should not have pending statements to validators in our cluster, /// but if we do for all validators in our cluster, then we don't participate - /// in backing. Ocasional pending statements are expected if two authorities - /// can't detect each otehr or after restart, where it takes a while to discover + /// in backing. Occasional pending statements are expected if two authorities + /// can't detect each other or after restart, where it takes a while to discover /// the whole network. pub fn warn_if_too_many_pending_statements(&self, parent_hash: Hash) { diff --git a/polkadot/node/network/statement-distribution/src/v2/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs index 2c9cdba4ea8eb1155f9307eb01460a777ae9a737..f5a8ec4a269639bb25d1b309378a50a36bfb54eb 100644 --- a/polkadot/node/network/statement-distribution/src/v2/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs @@ -46,6 +46,7 @@ use polkadot_node_subsystem_util::{ backing_implicit_view::View as ImplicitView, reputation::ReputationAggregator, runtime::{request_min_backing_votes, ProspectiveParachainsMode}, + vstaging::fetch_claim_queue, }; use polkadot_primitives::{ AuthorityDiscoveryId, CandidateHash, CompactStatement, CoreIndex, CoreState, GroupIndex, @@ -112,7 +113,7 @@ const COST_EXCESSIVE_SECONDED: Rep = Rep::CostMinor("Sent Excessive `Seconded` S const COST_DISABLED_VALIDATOR: Rep = Rep::CostMinor("Sent a statement from a disabled validator"); const COST_UNEXPECTED_MANIFEST_MISSING_KNOWLEDGE: Rep = - Rep::CostMinor("Unexpected Manifest, missing knowlege for relay parent"); + Rep::CostMinor("Unexpected Manifest, missing knowledge for relay parent"); const COST_UNEXPECTED_MANIFEST_DISALLOWED: Rep = Rep::CostMinor("Unexpected Manifest, Peer Disallowed"); const COST_UNEXPECTED_MANIFEST_PEER_UNKNOWN: Rep = @@ -149,10 +150,9 @@ pub(crate) const REQUEST_RETRY_DELAY: Duration = Duration::from_secs(1); struct PerRelayParentState { local_validator: Option, statement_store: StatementStore, - availability_cores: Vec, - group_rotation_info: GroupRotationInfo, seconding_limit: usize, session: SessionIndex, + groups_per_para: HashMap>, } impl PerRelayParentState { @@ -563,11 +563,13 @@ pub(crate) async fn handle_active_leaves_update( activated: &ActivatedLeaf, leaf_mode: ProspectiveParachainsMode, ) -> JfyiErrorResult<()> { - let seconding_limit = match leaf_mode { + let max_candidate_depth = match leaf_mode { ProspectiveParachainsMode::Disabled => return Ok(()), - ProspectiveParachainsMode::Enabled { max_candidate_depth, .. } => max_candidate_depth + 1, + ProspectiveParachainsMode::Enabled { max_candidate_depth, .. } => max_candidate_depth, }; + let seconding_limit = max_candidate_depth + 1; + state .implicit_view .activate_leaf(ctx.sender(), activated.hash) @@ -628,8 +630,8 @@ pub(crate) async fn handle_active_leaves_update( request_min_backing_votes(new_relay_parent, session_index, ctx.sender()).await?; let mut per_session_state = PerSessionState::new(session_info, &state.keystore, minimum_backing_votes); - if let Some(toplogy) = state.unused_topologies.remove(&session_index) { - per_session_state.supply_topology(&toplogy.topology, toplogy.local_index); + if let Some(topology) = state.unused_topologies.remove(&session_index) { + per_session_state.supply_topology(&topology.topology, topology.local_index); } state.per_session.insert(session_index, per_session_state); } @@ -693,15 +695,23 @@ pub(crate) async fn handle_active_leaves_update( } }); + let groups_per_para = determine_groups_per_para( + ctx.sender(), + new_relay_parent, + availability_cores, + group_rotation_info, + max_candidate_depth, + ) + .await; + state.per_relay_parent.insert( new_relay_parent, PerRelayParentState { local_validator, statement_store: StatementStore::new(&per_session.groups), - availability_cores, - group_rotation_info, seconding_limit, session: session_index, + groups_per_para, }, ); } @@ -2126,17 +2136,64 @@ async fn provide_candidate_to_grid( } } -fn group_for_para( - availability_cores: &[CoreState], - group_rotation_info: &GroupRotationInfo, - para_id: ParaId, -) -> Option { - // Note: this won't work well for on-demand parachains as it assumes that core assignments are - // fixed across blocks. - let core_index = availability_cores.iter().position(|c| c.para_id() == Some(para_id)); +// Utility function to populate per relay parent `ParaId` to `GroupIndex` mappings. +async fn determine_groups_per_para( + sender: &mut impl overseer::StatementDistributionSenderTrait, + relay_parent: Hash, + availability_cores: Vec, + group_rotation_info: GroupRotationInfo, + max_candidate_depth: usize, +) -> HashMap> { + let maybe_claim_queue = fetch_claim_queue(sender, relay_parent) + .await + .unwrap_or_else(|err| { + gum::debug!( + target: LOG_TARGET, + ?relay_parent, + ?err, + "determine_groups_per_para: `claim_queue` API not available, falling back to iterating availability cores" + ); + None + }); + + let n_cores = availability_cores.len(); - core_index - .map(|c| group_rotation_info.group_for_core(CoreIndex(c as _), availability_cores.len())) + // Determine the core indices occupied by each para at the current relay parent. To support + // on-demand parachains we also consider the core indices at next block if core has a candidate + // pending availability. + let para_core_indices: Vec<_> = if let Some(claim_queue) = maybe_claim_queue { + claim_queue + .iter_claims_at_depth(0) + .map(|(core_index, para)| (para, core_index)) + .collect() + } else { + availability_cores + .into_iter() + .enumerate() + .filter_map(|(index, core)| match core { + CoreState::Scheduled(scheduled_core) => + Some((scheduled_core.para_id, CoreIndex(index as u32))), + CoreState::Occupied(occupied_core) => + if max_candidate_depth >= 1 { + occupied_core + .next_up_on_available + .map(|scheduled_core| (scheduled_core.para_id, CoreIndex(index as u32))) + } else { + None + }, + CoreState::Free => None, + }) + .collect() + }; + + let mut groups_per_para = HashMap::new(); + // Map from `CoreIndex` to `GroupIndex` and collect as `HashMap`. + for (para, core_index) in para_core_indices { + let group_index = group_rotation_info.group_for_core(core_index, n_cores); + groups_per_para.entry(para).or_insert_with(Vec::new).push(group_index) + } + + groups_per_para } #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] @@ -2192,18 +2249,23 @@ async fn fragment_tree_update_inner( let confirmed_candidate = state.candidates.get_confirmed(&candidate_hash); let prs = state.per_relay_parent.get_mut(&receipt.descriptor().relay_parent); if let (Some(confirmed), Some(prs)) = (confirmed_candidate, prs) { - let group_index = group_for_para( - &prs.availability_cores, - &prs.group_rotation_info, - receipt.descriptor().para_id, - ); - let per_session = state.per_session.get(&prs.session); - if let (Some(per_session), Some(group_index)) = (per_session, group_index) { + let group_index = confirmed.group_index(); + + // Sanity check if group_index is valid for this para at relay parent. + let Some(expected_groups) = prs.groups_per_para.get(&receipt.descriptor().para_id) + else { + continue + }; + if !expected_groups.iter().any(|g| *g == group_index) { + continue + } + + if let Some(per_session) = per_session { send_backing_fresh_statements( ctx, candidate_hash, - group_index, + confirmed.group_index(), &receipt.descriptor().relay_parent, prs, confirmed, @@ -2311,13 +2373,12 @@ async fn handle_incoming_manifest_common<'a, Context>( Some(x) => x, }; - let expected_group = group_for_para( - &relay_parent_state.availability_cores, - &relay_parent_state.group_rotation_info, - para_id, - ); + let Some(expected_groups) = relay_parent_state.groups_per_para.get(¶_id) else { + modify_reputation(reputation, ctx.sender(), peer, COST_MALFORMED_MANIFEST).await; + return None + }; - if expected_group != Some(manifest_summary.claimed_group_index) { + if !expected_groups.iter().any(|g| g == &manifest_summary.claimed_group_index) { modify_reputation(reputation, ctx.sender(), peer, COST_MALFORMED_MANIFEST).await; return None } @@ -3037,13 +3098,11 @@ pub(crate) async fn handle_response( relay_parent_state.session, |v| per_session.session_info.validators.get(v).map(|x| x.clone()), |para, g_index| { - let expected_group = group_for_para( - &relay_parent_state.availability_cores, - &relay_parent_state.group_rotation_info, - para, - ); + let Some(expected_groups) = relay_parent_state.groups_per_para.get(¶) else { + return false + }; - Some(g_index) == expected_group + expected_groups.iter().any(|g| g == &g_index) }, disabled_mask, ); diff --git a/polkadot/node/network/statement-distribution/src/v2/requests.rs b/polkadot/node/network/statement-distribution/src/v2/requests.rs index bbcb268415e67141ff1c620a134582d06bd8a67f..fe270c8a58e81fc5ebee9a42c38a1cedd1a0b516 100644 --- a/polkadot/node/network/statement-distribution/src/v2/requests.rs +++ b/polkadot/node/network/statement-distribution/src/v2/requests.rs @@ -320,7 +320,7 @@ impl RequestManager { // need for the current node to limit itself to the same amount the // requests, because the requests are going to different nodes anyways. // While looking at https://github.com/paritytech/polkadot-sdk/issues/3314, - // found out that this requests take around 100ms to fullfill, so it + // found out that this requests take around 100ms to fulfill, so it // would make sense to try to request things as early as we can, given // we would need to request it for each candidate, around 25 right now // on kusama. diff --git a/polkadot/node/network/statement-distribution/src/v2/statement_store.rs b/polkadot/node/network/statement-distribution/src/v2/statement_store.rs index 022461e55511cbaba8040064408cd5b47a276cfb..a3b2636d2ffcb6c761a3f9c2da89f88fecd9d981 100644 --- a/polkadot/node/network/statement-distribution/src/v2/statement_store.rs +++ b/polkadot/node/network/statement-distribution/src/v2/statement_store.rs @@ -292,7 +292,7 @@ impl GroupStatements { mod tests { use super::*; - use polkadot_primitives::v6::{Hash, SigningContext, ValidatorPair}; + use polkadot_primitives::v7::{Hash, SigningContext, ValidatorPair}; use sp_application_crypto::Pair as PairT; #[test] diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs b/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs index a944a9cd6d021364d3b9fa7b349e9a649866cf29..4fb033e08ce3af0724d5a8770bdb0abf942e38a5 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs @@ -312,6 +312,66 @@ fn useful_cluster_statement_from_non_cluster_peer_rejected() { }); } +// Both validators in the test are part of backing groups assigned to same parachain +#[test] +fn elastic_scaling_useful_cluster_statement_from_non_cluster_peer_rejected() { + let config = TestConfig { + validator_count: 20, + group_size: 3, + local_validator: LocalRole::Validator, + async_backing_params: None, + }; + + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + + let test_leaf = state.make_dummy_leaf_with_multiple_cores_per_para(relay_parent, 3); + + // Peer A is not in our group, but its group is assigned to same para as we are. + let not_our_group = GroupIndex(1); + + let that_group_validators = state.group_validators(not_our_group, false); + let v_non = that_group_validators[0]; + + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(v_non)].into_iter().collect()), + ) + .await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + activate_leaf(&mut overseer, &test_leaf, &state, true, vec![]).await; + + let statement = state + .sign_statement( + v_non, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) + if p == peer_a && r == COST_UNEXPECTED_STATEMENT_INVALID_SENDER.into() => { } + ); + + overseer + }); +} + #[test] fn statement_from_non_cluster_originator_unexpected() { let config = TestConfig { diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs b/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs index 38a12cf32e3b37cd111b4404b53015376e749192..9d00a92e742bb6a304004ed74eb59ea8b6c5440c 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs @@ -1829,9 +1829,7 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() { }); } -// Grid statements imported to backing once candidate enters hypothetical frontier. -#[test] -fn grid_statements_imported_to_backing() { +fn inner_grid_statements_imported_to_backing(groups_for_first_para: usize) { let validator_count = 6; let group_size = 3; let config = TestConfig { @@ -1851,9 +1849,12 @@ fn grid_statements_imported_to_backing() { let local_group_index = local_validator.group_index.unwrap(); let other_group = next_group_index(local_group_index, validator_count, group_size); - let other_para = ParaId::from(other_group.0); - let test_leaf = state.make_dummy_leaf(relay_parent); + // Other para is same para for elastic scaling test (groups_for_first_para > 1) + let other_para = ParaId::from((groups_for_first_para == 1) as u32); + + let test_leaf = + state.make_dummy_leaf_with_multiple_cores_per_para(relay_parent, groups_for_first_para); let (candidate, pvd) = make_candidate( relay_parent, @@ -2018,6 +2019,18 @@ fn grid_statements_imported_to_backing() { overseer }); } +// Grid statements imported to backing once candidate enters hypothetical frontier. +#[test] +fn grid_statements_imported_to_backing() { + inner_grid_statements_imported_to_backing(1); +} + +// Grid statements imported to backing once candidate enters hypothetical frontier. +// All statements are for candidates of the same parachain but from different backing groups. +#[test] +fn elastic_scaling_grid_statements_imported_to_backing() { + inner_grid_statements_imported_to_backing(2); +} #[test] fn advertisements_rejected_from_incorrect_peers() { diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs index 82986a0330ec25d35f3b75a94be67157283bbd9a..e98b11079312b0539a39d82bdd57f4a8716112ee 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs @@ -177,20 +177,39 @@ impl TestState { } fn make_dummy_leaf(&self, relay_parent: Hash) -> TestLeaf { + self.make_dummy_leaf_with_multiple_cores_per_para(relay_parent, 1) + } + + fn make_dummy_leaf_with_multiple_cores_per_para( + &self, + relay_parent: Hash, + groups_for_first_para: usize, + ) -> TestLeaf { TestLeaf { number: 1, hash: relay_parent, parent_hash: Hash::repeat_byte(0), session: 1, availability_cores: self.make_availability_cores(|i| { - CoreState::Scheduled(ScheduledCore { - para_id: ParaId::from(i as u32), - collator: None, - }) + let para_id = if i < groups_for_first_para { + ParaId::from(0u32) + } else { + ParaId::from(i as u32) + }; + + CoreState::Scheduled(ScheduledCore { para_id, collator: None }) }), disabled_validators: Default::default(), para_data: (0..self.session_info.validator_groups.len()) - .map(|i| (ParaId::from(i as u32), PerParaData::new(1, vec![1, 2, 3].into()))) + .map(|i| { + let para_id = if i < groups_for_first_para { + ParaId::from(0u32) + } else { + ParaId::from(i as u32) + }; + + (para_id, PerParaData::new(1, vec![1, 2, 3].into())) + }) .collect(), minimum_backing_votes: 2, } diff --git a/polkadot/node/overseer/Cargo.toml b/polkadot/node/overseer/Cargo.toml index f91ec80d944064f5cba28275766c0fbcc9231133..ef79cfe2f702bb2b32c7901448da5e1b073c0b72 100644 --- a/polkadot/node/overseer/Cargo.toml +++ b/polkadot/node/overseer/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] client = { package = "sc-client-api", path = "../../../substrate/client/api" } sp-api = { path = "../../../substrate/primitives/api" } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" parking_lot = "0.12.1" polkadot-node-network-protocol = { path = "../network/protocol" } @@ -23,13 +23,13 @@ polkadot-primitives = { path = "../../primitives" } orchestra = { version = "0.3.5", default-features = false, features = ["futures_channel"] } gum = { package = "tracing-gum", path = "../gum" } sp-core = { path = "../../../substrate/primitives/core" } -async-trait = "0.1.74" +async-trait = "0.1.79" tikv-jemalloc-ctl = { version = "0.5.0", optional = true } [dev-dependencies] metered = { package = "prioritized-metered-channel", version = "0.6.1", default-features = false, features = ["futures_channel"] } sp-core = { path = "../../../substrate/primitives/core" } -futures = { version = "0.3.21", features = ["thread-pool"] } +futures = { version = "0.3.30", features = ["thread-pool"] } femme = "2.2.1" assert_matches = "1.4.0" test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../primitives/test-helpers" } diff --git a/polkadot/node/overseer/src/lib.rs b/polkadot/node/overseer/src/lib.rs index e16a3fd27ab3d920da5c83b3c4ee02ab4ccba2cb..167b32a15bc4d1e6cdb150a3e1d0133e035fb617 100644 --- a/polkadot/node/overseer/src/lib.rs +++ b/polkadot/node/overseer/src/lib.rs @@ -871,7 +871,7 @@ where gum::trace!( target: LOG_TARGET, relay_parent = ?hash, - "Leaf got activated, notifying exterinal listeners" + "Leaf got activated, notifying external listeners" ); for listener in listeners { // it's fine if the listener is no longer interested diff --git a/polkadot/node/overseer/src/tests.rs b/polkadot/node/overseer/src/tests.rs index 0494274367d953146ad06e93c3ee00cfc9c3b983..55a6bdb74ba73c04bc66847a1b04ff6175608fa7 100644 --- a/polkadot/node/overseer/src/tests.rs +++ b/polkadot/node/overseer/src/tests.rs @@ -811,7 +811,7 @@ fn test_candidate_validation_msg() -> CandidateValidationMessage { fn test_candidate_backing_msg() -> CandidateBackingMessage { let (sender, _) = oneshot::channel(); - CandidateBackingMessage::GetBackedCandidates(Vec::new(), sender) + CandidateBackingMessage::GetBackedCandidates(Default::default(), sender) } fn test_chain_api_msg() -> ChainApiMessage { diff --git a/polkadot/node/primitives/Cargo.toml b/polkadot/node/primitives/Cargo.toml index b4541bcc346c8ce4282947c38e0d9a2591631090..a4bbd824e6712e001f1df1cd9d66f835d02822fb 100644 --- a/polkadot/node/primitives/Cargo.toml +++ b/polkadot/node/primitives/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] bounded-vec = "0.7" -futures = "0.3.21" +futures = "0.3.30" polkadot-primitives = { path = "../../primitives" } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } sp-core = { path = "../../../substrate/primitives/core" } diff --git a/polkadot/node/primitives/src/approval.rs b/polkadot/node/primitives/src/approval.rs index f2a79e025affe3de108f0ce8abd985e53c862a5c..b73cb4c717db7ff856669b528d5e065e413d6101 100644 --- a/polkadot/node/primitives/src/approval.rs +++ b/polkadot/node/primitives/src/approval.rs @@ -382,7 +382,7 @@ pub mod v2 { /// The core index chosen in this cert. core_index: CoreIndex, }, - /// Deprectated assignment. Soon to be removed. + /// Deprecated assignment. Soon to be removed. /// An assignment story based on the VRF that authorized the relay-chain block where the /// candidate was included combined with a sample number. /// diff --git a/polkadot/node/primitives/src/disputes/mod.rs b/polkadot/node/primitives/src/disputes/mod.rs index 768b95f65537b7ebfe5e4a8baadfd9eec685af4f..5814ecee44f4e9ee333f1898c92341799978d646 100644 --- a/polkadot/node/primitives/src/disputes/mod.rs +++ b/polkadot/node/primitives/src/disputes/mod.rs @@ -84,7 +84,7 @@ impl CandidateVotes { #[derive(Debug, Clone)] /// Valid candidate votes. /// -/// Prefere backing votes over other votes. +/// Prefer backing votes over other votes. pub struct ValidCandidateVotes { votes: BTreeMap, } @@ -133,7 +133,7 @@ impl ValidCandidateVotes { self.votes.retain(f) } - /// Get all the validator indeces we have votes for. + /// Get all the validator indices we have votes for. pub fn keys( &self, ) -> Bkeys<'_, ValidatorIndex, (ValidDisputeStatementKind, ValidatorSignature)> { diff --git a/polkadot/node/primitives/src/lib.rs b/polkadot/node/primitives/src/lib.rs index b6556e0be56bfd03afb8249dd969c74bd42cc2bb..b127d87d4ea4dd7e48b282c2a1c8a7ad525fc5f4 100644 --- a/polkadot/node/primitives/src/lib.rs +++ b/polkadot/node/primitives/src/lib.rs @@ -31,8 +31,8 @@ use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use polkadot_primitives::{ BlakeTwo256, BlockNumber, CandidateCommitments, CandidateHash, CollatorPair, - CommittedCandidateReceipt, CompactStatement, EncodeAs, Hash, HashT, HeadData, Id as ParaId, - PersistedValidationData, SessionIndex, Signed, UncheckedSigned, ValidationCode, + CommittedCandidateReceipt, CompactStatement, CoreIndex, EncodeAs, Hash, HashT, HeadData, + Id as ParaId, PersistedValidationData, SessionIndex, Signed, UncheckedSigned, ValidationCode, ValidationCodeHash, ValidatorIndex, MAX_CODE_SIZE, MAX_POV_SIZE, }; pub use sp_consensus_babe::{ @@ -74,7 +74,7 @@ pub const VALIDATION_CODE_BOMB_LIMIT: usize = (MAX_CODE_SIZE * 4u32) as usize; pub const POV_BOMB_LIMIT: usize = (MAX_POV_SIZE * 4u32) as usize; /// How many blocks after finalization an information about backed/included candidate should be -/// pre-loaded (when scraoing onchain votes) and kept locally (when pruning). +/// pre-loaded (when scraping onchain votes) and kept locally (when pruning). /// /// We don't want to remove scraped candidates on finalization because we want to /// be sure that disputes will conclude on abandoned forks. @@ -524,6 +524,8 @@ pub struct SubmitCollationParams { /// okay to just drop it. However, if it is called, it should be called with the signed /// statement of a parachain validator seconding the collation. pub result_sender: Option>, + /// The core index on which the resulting candidate should be backed + pub core_index: CoreIndex, } /// This is the data we keep available for each candidate included in the relay chain. diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index e2bccfa551090da442bfd14e81c5abe27f9090c5..9688ab556473131d6e90075de9b6f69ba4f244e5 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -65,7 +65,6 @@ sp-version = { path = "../../../substrate/primitives/version" } # Substrate Pallets pallet-babe = { path = "../../../substrate/frame/babe" } -pallet-im-online = { path = "../../../substrate/frame/im-online" } pallet-staking = { path = "../../../substrate/frame/staking" } pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api" } frame-system = { path = "../../../substrate/frame/system" } @@ -78,8 +77,8 @@ frame-benchmarking-cli = { path = "../../../substrate/utils/frame/benchmarking-c frame-benchmarking = { path = "../../../substrate/frame/benchmarking" } # External Crates -async-trait = "0.1.74" -futures = "0.3.21" +async-trait = "0.1.79" +futures = "0.3.30" hex-literal = "0.4.1" is_executable = "1.0.1" gum = { package = "tracing-gum", path = "../gum" } @@ -142,11 +141,14 @@ polkadot-node-core-pvf-checker = { path = "../core/pvf-checker", optional = true polkadot-node-core-runtime-api = { path = "../core/runtime-api", optional = true } polkadot-statement-distribution = { path = "../network/statement-distribution", optional = true } +xcm = { package = "staging-xcm", path = "../../xcm" } +xcm-fee-payment-runtime-api = { path = "../../xcm/xcm-fee-payment-runtime-api" } + [dev-dependencies] polkadot-test-client = { path = "../test/client" } polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../primitives/test-helpers" } -env_logger = "0.9.0" +env_logger = "0.11" assert_matches = "1.5.0" serial_test = "2.0.0" tempfile = "3.2" @@ -194,7 +196,6 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-babe/runtime-benchmarks", - "pallet-im-online/runtime-benchmarks", "pallet-staking/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-primitives/runtime-benchmarks", @@ -210,7 +211,6 @@ try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", "pallet-babe/try-runtime", - "pallet-im-online/try-runtime", "pallet-staking/try-runtime", "pallet-transaction-payment/try-runtime", "polkadot-runtime-parachains/try-runtime", diff --git a/polkadot/node/service/src/chain_spec.rs b/polkadot/node/service/src/chain_spec.rs index 1c44b17b6fd24810bf896a64953a1b2f53b490da..1b6ba99777b03f6c633ecbbdcdfe31dc39a7a7ac 100644 --- a/polkadot/node/service/src/chain_spec.rs +++ b/polkadot/node/service/src/chain_spec.rs @@ -123,7 +123,8 @@ fn default_parachains_host_configuration( ) -> polkadot_runtime_parachains::configuration::HostConfiguration { use polkadot_primitives::{ - vstaging::node_features::FeatureIndex, AsyncBackingParams, MAX_CODE_SIZE, MAX_POV_SIZE, + node_features::FeatureIndex, ApprovalVotingParams, AsyncBackingParams, MAX_CODE_SIZE, + MAX_POV_SIZE, }; polkadot_runtime_parachains::configuration::HostConfiguration { @@ -158,7 +159,8 @@ fn default_parachains_host_configuration( allowed_ancestry_len: 2, }, node_features: bitvec::vec::BitVec::from_element( - 1u8 << (FeatureIndex::ElasticScalingMVP as usize), + 1u8 << (FeatureIndex::ElasticScalingMVP as usize) | + 1u8 << (FeatureIndex::EnableAssignmentsV2 as usize), ), scheduler_params: SchedulerParams { lookahead: 2, @@ -166,6 +168,7 @@ fn default_parachains_host_configuration( paras_availability_period: 4, ..Default::default() }, + approval_voting_params: ApprovalVotingParams { max_approval_coalesce_count: 5 }, ..Default::default() } } diff --git a/polkadot/node/service/src/fake_runtime_api.rs b/polkadot/node/service/src/fake_runtime_api.rs index 085ea93fdc78526f793b726bf385124ec6418e5d..c6cfb7a27d042bdf9e1b23cd41f88b8483a414a5 100644 --- a/polkadot/node/service/src/fake_runtime_api.rs +++ b/polkadot/node/service/src/fake_runtime_api.rs @@ -30,6 +30,7 @@ use polkadot_primitives::{ ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; + use sp_core::OpaqueMetadata; use sp_runtime::{ traits::Block as BlockT, @@ -39,7 +40,7 @@ use sp_runtime::{ use sp_version::RuntimeVersion; use sp_weights::Weight; use std::collections::BTreeMap; - +use xcm::{VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm}; sp_api::decl_runtime_apis! { /// This runtime API is only implemented for the test runtime! pub trait GetLastTimestamp { @@ -396,4 +397,22 @@ sp_api::impl_runtime_apis! { unimplemented!() } } + + impl xcm_fee_payment_runtime_api::XcmPaymentApi for Runtime { + fn query_acceptable_payment_assets(_: xcm::Version) -> Result, xcm_fee_payment_runtime_api::Error> { + unimplemented!() + } + + fn query_weight_to_asset_fee(_: Weight, _: VersionedAssetId) -> Result { + unimplemented!() + } + + fn query_xcm_weight(_: VersionedXcm<()>) -> Result { + unimplemented!() + } + + fn query_delivery_fees(_: VersionedLocation, _: VersionedXcm<()>) -> Result { + unimplemented!() + } + } } diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index 4f4ede537055ebf58d0c126890256f9be2dad830..61076477f8e72fdc13d2357e8ff9100bb5836c0f 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -1233,6 +1233,7 @@ pub fn new_full( prometheus_registry: prometheus_registry.clone(), links: beefy_links, on_demand_justifications_handler: beefy_on_demand_justifications_handler, + is_authority: role.is_authority(), }; let gadget = beefy::start_beefy_gadget::<_, _, _, _, _, _, _>(beefy_params); @@ -1242,18 +1243,18 @@ pub fn new_full( task_manager .spawn_essential_handle() .spawn_blocking("beefy-gadget", None, gadget); - // When offchain indexing is enabled, MMR gadget should also run. - if is_offchain_indexing_enabled { - task_manager.spawn_essential_handle().spawn_blocking( - "mmr-gadget", - None, - MmrGadget::start( - client.clone(), - backend.clone(), - sp_mmr_primitives::INDEXING_PREFIX.to_vec(), - ), - ); - } + } + // When offchain indexing is enabled, MMR gadget should also run. + if is_offchain_indexing_enabled { + task_manager.spawn_essential_handle().spawn_blocking( + "mmr-gadget", + None, + MmrGadget::start( + client.clone(), + backend.clone(), + sp_mmr_primitives::INDEXING_PREFIX.to_vec(), + ), + ); } let config = grandpa::Config { diff --git a/polkadot/node/service/src/overseer.rs b/polkadot/node/service/src/overseer.rs index a8167370464937ac91ae44056ea0b17b7184927a..9575b2458a259dfd2716aec6d688a95a73f5530d 100644 --- a/polkadot/node/service/src/overseer.rs +++ b/polkadot/node/service/src/overseer.rs @@ -77,7 +77,7 @@ pub struct OverseerGenArgs<'a, Spawner, RuntimeClient> where Spawner: 'static + SpawnNamed + Clone + Unpin, { - /// Runtime client generic, providing the `ProvieRuntimeApi` trait besides others. + /// Runtime client generic, providing the `ProvideRuntimeApi` trait besides others. pub runtime_client: Arc, /// Underlying network service implementation. pub network_service: Arc>, diff --git a/polkadot/node/service/src/parachains_db/upgrade.rs b/polkadot/node/service/src/parachains_db/upgrade.rs index 2eceb391b15c278825b243fdb8dc21e8ef540390..4d7370859609d5559eb9f3b440bb6b4ee28cddff 100644 --- a/polkadot/node/service/src/parachains_db/upgrade.rs +++ b/polkadot/node/service/src/parachains_db/upgrade.rs @@ -115,7 +115,7 @@ pub(crate) fn try_upgrade_db_to_next_version( Some(CURRENT_VERSION) => CURRENT_VERSION, // This is an arbitrary future version, we don't handle it. Some(v) => return Err(Error::FutureVersion { current: CURRENT_VERSION, got: v }), - // No version file. For `RocksDB` we dont need to do anything. + // No version file. For `RocksDB` we don't need to do anything. None if db_kind == DatabaseKind::RocksDB => CURRENT_VERSION, // No version file. `ParityDB` did not previously have a version defined. // We handle this as a `0 -> 1` migration. @@ -183,7 +183,7 @@ fn migrate_from_version_1_to_2(path: &Path, db_kind: DatabaseKind) -> Result { // The total lag accounting for disputes. let lag_disputes = initial_leaf_number.saturating_sub(subchain_number); diff --git a/polkadot/node/subsystem-bench/Cargo.toml b/polkadot/node/subsystem-bench/Cargo.toml index 71711ad0fbd07ff84b13bc6e1539dc2c4795ba07..37224d110e8813dbc7b66c2e3dce4a78b617f459 100644 --- a/polkadot/node/subsystem-bench/Cargo.toml +++ b/polkadot/node/subsystem-bench/Cargo.toml @@ -35,12 +35,12 @@ color-eyre = { version = "0.6.1", default-features = false } polkadot-overseer = { path = "../overseer" } colored = "2.0.4" assert_matches = "1.5" -async-trait = "0.1.57" +async-trait = "0.1.79" sp-keystore = { path = "../../../substrate/primitives/keystore" } sc-keystore = { path = "../../../substrate/client/keystore" } sp-core = { path = "../../../substrate/primitives/core" } clap = { version = "4.5.3", features = ["derive"] } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" bincode = "1.3.3" sha1 = "0.10.6" @@ -48,7 +48,7 @@ hex = "0.4.3" gum = { package = "tracing-gum", path = "../gum" } polkadot-erasure-coding = { package = "polkadot-erasure-coding", path = "../../erasure-coding" } log = { workspace = true, default-features = true } -env_logger = "0.9.0" +env_logger = "0.11" rand = "0.8.5" # `rand` only supports uniform distribution, we need normal distribution for latency. rand_distr = "0.4.3" @@ -56,7 +56,7 @@ bitvec = "1.0.1" kvdb-memorydb = "0.13.0" parity-scale-codec = { version = "3.6.1", features = ["derive", "std"] } -tokio = "1.24.2" +tokio = { version = "1.24.2", features = ["parking_lot", "rt-multi-thread"] } clap-num = "1.0.2" polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } sp-keyring = { path = "../../../substrate/primitives/keyring" } @@ -71,6 +71,7 @@ prometheus_endpoint = { package = "substrate-prometheus-endpoint", path = "../.. prometheus = { version = "0.13.0", default-features = false } serde = { workspace = true, default-features = true } serde_yaml = { workspace = true } +serde_json = { workspace = true } polkadot-node-core-approval-voting = { path = "../core/approval-voting" } polkadot-approval-distribution = { path = "../network/approval-distribution" } diff --git a/polkadot/node/subsystem-bench/grafana/availability-read.json b/polkadot/node/subsystem-bench/grafana/availability-read.json index 31c4ad3c795230402ec54d5558c24a3ab9664db4..96a83d2d70fe2e627eda9f142bdf7c67c54fd5a2 100644 --- a/polkadot/node/subsystem-bench/grafana/availability-read.json +++ b/polkadot/node/subsystem-bench/grafana/availability-read.json @@ -119,7 +119,7 @@ "editorMode": "code", "expr": "subsystem_benchmark_n_validators{}", "instant": false, - "legendFormat": "n_vaidators", + "legendFormat": "n_validators", "range": true, "refId": "A" }, @@ -1046,7 +1046,7 @@ "refId": "A" } ], - "title": "Availability subystem metrics", + "title": "Availability subsystem metrics", "type": "row" }, { @@ -1397,7 +1397,7 @@ "refId": "B" } ], - "title": "Recovery throughtput", + "title": "Recovery throughput", "transformations": [], "type": "timeseries" }, diff --git a/polkadot/node/subsystem-bench/src/cli/subsystem-bench.rs b/polkadot/node/subsystem-bench/src/cli/subsystem-bench.rs index deb351360d74ede1ea78494a34f7c459dc544cdd..10953b6c7839d8ea2a4e0396a4ee37b9875828e9 100644 --- a/polkadot/node/subsystem-bench/src/cli/subsystem-bench.rs +++ b/polkadot/node/subsystem-bench/src/cli/subsystem-bench.rs @@ -136,31 +136,29 @@ impl BenchCli { let usage = match objective { TestObjective::DataAvailabilityRead(opts) => { - let mut state = availability::TestState::new(&test_config); + let state = availability::TestState::new(&test_config); let (mut env, _protocol_config) = availability::prepare_test( - test_config, - &mut state, + &state, availability::TestDataAvailability::Read(opts), true, ); env.runtime().block_on(availability::benchmark_availability_read( &benchmark_name, &mut env, - state, + &state, )) }, TestObjective::DataAvailabilityWrite => { - let mut state = availability::TestState::new(&test_config); + let state = availability::TestState::new(&test_config); let (mut env, _protocol_config) = availability::prepare_test( - test_config, - &mut state, + &state, availability::TestDataAvailability::Write, true, ); env.runtime().block_on(availability::benchmark_availability_write( &benchmark_name, &mut env, - state, + &state, )) }, TestObjective::ApprovalVoting(ref options) => { diff --git a/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs b/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs index c1b31a509f6dd21c3f23bb44afec8859e51fa76a..619a3617ca4ddd1ce13008be110415ab432e0405 100644 --- a/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs @@ -41,8 +41,8 @@ use polkadot_node_primitives::approval::{ v2::{CoreBitfield, IndirectAssignmentCertV2, IndirectSignedApprovalVoteV2}, }; use polkadot_primitives::{ - vstaging::ApprovalVoteMultipleCandidates, CandidateEvent, CandidateHash, CandidateIndex, - CoreIndex, Hash, SessionInfo, Slot, ValidatorId, ValidatorIndex, ASSIGNMENT_KEY_TYPE_ID, + ApprovalVoteMultipleCandidates, CandidateEvent, CandidateHash, CandidateIndex, CoreIndex, Hash, + SessionInfo, Slot, ValidatorId, ValidatorIndex, ASSIGNMENT_KEY_TYPE_ID, }; use rand::{seq::SliceRandom, RngCore, SeedableRng}; use rand_chacha::ChaCha20Rng; @@ -74,8 +74,8 @@ pub struct PeerMessagesGenerator { pub validator_index: ValidatorIndex, /// An array of pre-generated random samplings, that is used to determine, which nodes would /// send a given assignment, to the node under test because of the random samplings. - /// As an optimization we generate this sampling at the begining of the test and just pick - /// one randomly, because always taking the samples would be too expensive for benchamrk. + /// As an optimization we generate this sampling at the beginning of the test and just pick + /// one randomly, because always taking the samples would be too expensive for benchmark. pub random_samplings: Vec>, /// Channel for sending the generated messages to the aggregator pub tx_messages: futures::channel::mpsc::UnboundedSender<(Hash, Vec)>, @@ -234,7 +234,7 @@ impl PeerMessagesGenerator { let all_messages = all_messages .into_iter() .flat_map(|(_, mut messages)| { - // Shuffle the messages inside the same tick, so that we don't priorites messages + // Shuffle the messages inside the same tick, so that we don't priorities messages // for older nodes. we try to simulate the same behaviour as in real world. messages.shuffle(&mut rand_chacha); messages @@ -560,12 +560,12 @@ struct TestSignInfo { candidate_index: CandidateIndex, /// The validator sending the assignments validator_index: ValidatorIndex, - /// The assignments convering this candidate + /// The assignments covering this candidate assignment: TestMessageInfo, } impl TestSignInfo { - /// Helper function to create a signture for all candidates in `to_sign` parameter. + /// Helper function to create a signature for all candidates in `to_sign` parameter. /// Returns a TestMessage fn sign_candidates( to_sign: &mut Vec, diff --git a/polkadot/node/subsystem-bench/src/lib/approval/mod.rs b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs index 450faf06123f61db9ee91d1d935721d3c128e701..6ab5b86baede30117489b892490dd14b415650e7 100644 --- a/polkadot/node/subsystem-bench/src/lib/approval/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs @@ -115,7 +115,7 @@ pub struct ApprovalsOptions { #[clap(short, long, default_value_t = 1.0)] /// Max candidate to be signed in a single approval. pub coalesce_std_dev: f32, - /// The maximum tranche diff between approvals coalesced toghther. + /// The maximum tranche diff between approvals coalesced together. pub coalesce_tranche_diff: u32, #[clap(short, long, default_value_t = false)] /// Enable assignments v2. @@ -170,7 +170,7 @@ struct BlockTestData { total_candidates_before: u64, /// The votes we sent. /// votes[validator_index][candidate_index] tells if validator sent vote for candidate. - /// We use this to mark the test as succesfull if GetApprovalSignatures returns all the votes + /// We use this to mark the test as successful if GetApprovalSignatures returns all the votes /// from here. votes: Arc>>, } @@ -237,7 +237,7 @@ struct GeneratedState { } /// Approval test state used by all mock subsystems to be able to answer messages emitted -/// by the approval-voting and approval-distribution-subystems. +/// by the approval-voting and approval-distribution-subsystems. /// /// This gets cloned across all mock subsystems, so if there is any information that gets /// updated between subsystems, they would have to be wrapped in Arc's. @@ -498,7 +498,7 @@ struct PeerMessageProducer { impl PeerMessageProducer { /// Generates messages by spawning a blocking task in the background which begins creating - /// the assignments/approvals and peer view changes at the begining of each block. + /// the assignments/approvals and peer view changes at the beginning of each block. fn produce_messages( mut self, env: &TestEnvironment, @@ -740,7 +740,7 @@ impl PeerMessageProducer { } } - // Initializes the candidates test data. This is used for bookeeping if more assignments and + // Initializes the candidates test data. This is used for bookkeeping if more assignments and // approvals would be needed. fn initialize_candidates_test_data( &self, @@ -767,7 +767,7 @@ impl PeerMessageProducer { } /// Helper function to build an overseer with the real implementation for `ApprovalDistribution` and -/// `ApprovalVoting` subystems and mock subsytems for all others. +/// `ApprovalVoting` subsystems and mock subsystems for all others. fn build_overseer( state: &ApprovalTestState, network: &NetworkEmulatorHandle, @@ -936,7 +936,7 @@ pub async fn bench_approvals_run( for block_num in 0..env.config().num_blocks { let mut current_slot = tick_to_slot_number(SLOT_DURATION_MILLIS, system_clock.tick_now()); - // Wait untill the time arrieves at the first slot under test. + // Wait until the time arrives at the first slot under test. while current_slot < state.generated_state.initial_slot { sleep(Duration::from_millis(5)).await; current_slot = tick_to_slot_number(SLOT_DURATION_MILLIS, system_clock.tick_now()); @@ -961,7 +961,7 @@ pub async fn bench_approvals_run( } // Wait for all blocks to be approved before exiting. - // This is an invariant of the benchmark, if this does not happen something went teribbly wrong. + // This is an invariant of the benchmark, if this does not happen something went terribly wrong. while state.last_approved_block.load(std::sync::atomic::Ordering::SeqCst) < env.config().num_blocks as u32 { diff --git a/polkadot/node/subsystem-bench/src/lib/approval/test_message.rs b/polkadot/node/subsystem-bench/src/lib/approval/test_message.rs index 63e383509be9ff6da6e1d5dd61baebc99c574bc4..f55ed99205ede2b8ed254248d9a024bde3d46a11 100644 --- a/polkadot/node/subsystem-bench/src/lib/approval/test_message.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/test_message.rs @@ -31,7 +31,7 @@ pub struct TestMessageInfo { pub msg: protocol_v3::ApprovalDistributionMessage, /// The list of peers that would sends this message in a real topology. /// It includes both the peers that would send the message because of the topology - /// or because of randomly chosing so. + /// or because of randomly choosing so. pub sent_by: Vec, /// The tranche at which this message should be sent. pub tranche: u32, @@ -90,7 +90,7 @@ impl MessagesBundle { /// Tells if the bundle is needed for sending. /// We either send it because we need more assignments and approvals to approve the candidates - /// or because we configured the test to send messages untill a given tranche. + /// or because we configured the test to send messages until a given tranche. pub fn should_send( &self, candidates_test_data: &HashMap<(Hash, CandidateIndex), CandidateTestData>, @@ -174,24 +174,24 @@ impl TestMessageInfo { } } - /// Returns a list of candidates indicies in this message + /// Returns a list of candidates indices in this message pub fn candidate_indices(&self) -> HashSet { - let mut unique_candidate_indicies = HashSet::new(); + let mut unique_candidate_indices = HashSet::new(); match &self.msg { protocol_v3::ApprovalDistributionMessage::Assignments(assignments) => for (_assignment, candidate_indices) in assignments { for candidate_index in candidate_indices.iter_ones() { - unique_candidate_indicies.insert(candidate_index); + unique_candidate_indices.insert(candidate_index); } }, protocol_v3::ApprovalDistributionMessage::Approvals(approvals) => for approval in approvals { for candidate_index in approval.candidate_indices.iter_ones() { - unique_candidate_indicies.insert(candidate_index); + unique_candidate_indices.insert(candidate_index); } }, } - unique_candidate_indicies + unique_candidate_indices } /// Marks this message as no-shows if the number of configured no-shows is above the registered diff --git a/polkadot/node/subsystem-bench/src/lib/availability/mod.rs b/polkadot/node/subsystem-bench/src/lib/availability/mod.rs index dc4e1e4031025ca3f868265ac2e8c902f6fc8c23..fe98666906183df9a987a692b4a8be646eab60be 100644 --- a/polkadot/node/subsystem-bench/src/lib/availability/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/availability/mod.rs @@ -15,11 +15,11 @@ // along with Polkadot. If not, see . use crate::{ - configuration::TestConfiguration, + availability::av_store_helpers::new_av_store, dummy_builder, environment::{TestEnvironment, TestEnvironmentDependencies, GENESIS_HASH}, mock::{ - av_store::{self, MockAvailabilityStore}, + av_store::{self, MockAvailabilityStore, NetworkAvailabilityState}, chain_api::{ChainApiState, MockChainApi}, network_bridge::{self, MockNetworkBridgeRx, MockNetworkBridgeTx}, runtime_api::{self, MockRuntimeApi}, @@ -28,12 +28,8 @@ use crate::{ network::new_network, usage::BenchmarkUsage, }; -use av_store::NetworkAvailabilityState; -use av_store_helpers::new_av_store; -use bitvec::bitvec; use colored::Colorize; use futures::{channel::oneshot, stream::FuturesUnordered, StreamExt}; -use itertools::Itertools; use parity_scale_codec::Encode; use polkadot_availability_bitfield_distribution::BitfieldDistribution; use polkadot_availability_distribution::{ @@ -43,37 +39,27 @@ use polkadot_availability_recovery::AvailabilityRecoverySubsystem; use polkadot_node_core_av_store::AvailabilityStoreSubsystem; use polkadot_node_metrics::metrics::Metrics; use polkadot_node_network_protocol::{ - request_response::{v1::ChunkFetchingRequest, IncomingRequest, ReqProtocolNames}, - OurView, Versioned, VersionedValidationProtocol, + request_response::{IncomingRequest, ReqProtocolNames}, + OurView, }; -use polkadot_node_primitives::{AvailableData, BlockData, ErasureChunk, PoV}; use polkadot_node_subsystem::{ messages::{AllMessages, AvailabilityRecoveryMessage}, Overseer, OverseerConnector, SpawnGlue, }; -use polkadot_node_subsystem_test_helpers::{ - derive_erasure_chunks_with_proofs_and_root, mock::new_block_import_info, -}; use polkadot_node_subsystem_types::{ messages::{AvailabilityStoreMessage, NetworkBridgeEvent}, Span, }; use polkadot_overseer::{metrics::Metrics as OverseerMetrics, Handle as OverseerHandle}; -use polkadot_primitives::{ - AvailabilityBitfield, BlockNumber, CandidateHash, CandidateReceipt, GroupIndex, Hash, HeadData, - Header, PersistedValidationData, Signed, SigningContext, ValidatorIndex, -}; -use polkadot_primitives_test_helpers::{dummy_candidate_receipt, dummy_hash}; -use sc_network::{ - request_responses::{IncomingRequest as RawIncomingRequest, ProtocolConfig}, - PeerId, -}; +use polkadot_primitives::GroupIndex; +use sc_network::request_responses::{IncomingRequest as RawIncomingRequest, ProtocolConfig}; use sc_service::SpawnTaskHandle; use serde::{Deserialize, Serialize}; -use sp_core::H256; -use std::{collections::HashMap, iter::Cycle, ops::Sub, sync::Arc, time::Instant}; +use std::{ops::Sub, sync::Arc, time::Instant}; +pub use test_state::TestState; mod av_store_helpers; +mod test_state; const LOG_TARGET: &str = "subsystem-bench::availability"; @@ -83,7 +69,7 @@ const LOG_TARGET: &str = "subsystem-bench::availability"; pub struct DataAvailabilityReadOptions { #[clap(short, long, default_value_t = false)] /// Turbo boost AD Read by fetching the full availability datafrom backers first. Saves CPU as - /// we don't need to re-construct from chunks. Tipically this is only faster if nodes have + /// we don't need to re-construct from chunks. Typically this is only faster if nodes have /// enough bandwidth. pub fetch_from_backers: bool, } @@ -149,94 +135,48 @@ fn build_overseer_for_availability_write( (overseer, OverseerHandle::new(raw_handle)) } -/// Takes a test configuration and uses it to create the `TestEnvironment`. pub fn prepare_test( - config: TestConfiguration, - state: &mut TestState, - mode: TestDataAvailability, - with_prometheus_endpoint: bool, -) -> (TestEnvironment, Vec) { - prepare_test_inner( - config, - state, - mode, - TestEnvironmentDependencies::default(), - with_prometheus_endpoint, - ) -} - -fn prepare_test_inner( - config: TestConfiguration, - state: &mut TestState, + state: &TestState, mode: TestDataAvailability, - dependencies: TestEnvironmentDependencies, with_prometheus_endpoint: bool, ) -> (TestEnvironment, Vec) { - // Generate test authorities. - let test_authorities = config.generate_authorities(); - - let mut candidate_hashes: HashMap> = HashMap::new(); - - // Prepare per block candidates. - // Genesis block is always finalized, so we start at 1. - for block_num in 1..=config.num_blocks { - for _ in 0..config.n_cores { - candidate_hashes - .entry(Hash::repeat_byte(block_num as u8)) - .or_default() - .push(state.next_candidate().expect("Cycle iterator")) - } - - // First candidate is our backed candidate. - state.backed_candidates.push( - candidate_hashes - .get(&Hash::repeat_byte(block_num as u8)) - .expect("just inserted above") - .first() - .expect("just inserted above") - .clone(), - ); - } - - let runtime_api = runtime_api::MockRuntimeApi::new( - config.clone(), - test_authorities.clone(), - candidate_hashes, - Default::default(), - Default::default(), - 0, - ); - - let availability_state = NetworkAvailabilityState { - candidate_hashes: state.candidate_hashes.clone(), - available_data: state.available_data.clone(), - chunks: state.chunks.clone(), - }; - - let mut req_cfgs = Vec::new(); - let (collation_req_receiver, collation_req_cfg) = IncomingRequest::get_config_receiver(&ReqProtocolNames::new(GENESIS_HASH, None)); - req_cfgs.push(collation_req_cfg); - let (pov_req_receiver, pov_req_cfg) = IncomingRequest::get_config_receiver(&ReqProtocolNames::new(GENESIS_HASH, None)); - let (chunk_req_receiver, chunk_req_cfg) = IncomingRequest::get_config_receiver(&ReqProtocolNames::new(GENESIS_HASH, None)); - req_cfgs.push(pov_req_cfg); + let req_cfgs = vec![collation_req_cfg, pov_req_cfg]; - let (network, network_interface, network_receiver) = - new_network(&config, &dependencies, &test_authorities, vec![Arc::new(availability_state)]); + let dependencies = TestEnvironmentDependencies::default(); + let availability_state = NetworkAvailabilityState { + candidate_hashes: state.candidate_hashes.clone(), + available_data: state.available_data.clone(), + chunks: state.chunks.clone(), + }; + let (network, network_interface, network_receiver) = new_network( + &state.config, + &dependencies, + &state.test_authorities, + vec![Arc::new(availability_state.clone())], + ); let network_bridge_tx = network_bridge::MockNetworkBridgeTx::new( network.clone(), network_interface.subsystem_sender(), - test_authorities.clone(), + state.test_authorities.clone(), ); - let network_bridge_rx = - network_bridge::MockNetworkBridgeRx::new(network_receiver, Some(chunk_req_cfg.clone())); + network_bridge::MockNetworkBridgeRx::new(network_receiver, Some(chunk_req_cfg)); + + let runtime_api = runtime_api::MockRuntimeApi::new( + state.config.clone(), + state.test_authorities.clone(), + state.candidate_receipts.clone(), + Default::default(), + Default::default(), + 0, + ); let (overseer, overseer_handle) = match &mode { TestDataAvailability::Read(options) => { @@ -271,27 +211,12 @@ fn prepare_test_inner( }, TestDataAvailability::Write => { let availability_distribution = AvailabilityDistributionSubsystem::new( - test_authorities.keyring.keystore(), + state.test_authorities.keyring.keystore(), IncomingRequestReceivers { pov_req_receiver, chunk_req_receiver }, Metrics::try_register(&dependencies.registry).unwrap(), ); - let block_headers = (1..=config.num_blocks) - .map(|block_number| { - ( - Hash::repeat_byte(block_number as u8), - Header { - digest: Default::default(), - number: block_number as BlockNumber, - parent_hash: Default::default(), - extrinsics_root: Default::default(), - state_root: Default::default(), - }, - ) - }) - .collect::>(); - - let chain_api_state = ChainApiState { block_headers }; + let chain_api_state = ChainApiState { block_headers: state.block_headers.clone() }; let chain_api = MockChainApi::new(chain_api_state); let bitfield_distribution = BitfieldDistribution::new(Metrics::try_register(&dependencies.registry).unwrap()); @@ -311,167 +236,42 @@ fn prepare_test_inner( ( TestEnvironment::new( dependencies, - config, + state.config.clone(), network, overseer, overseer_handle, - test_authorities, + state.test_authorities.clone(), with_prometheus_endpoint, ), req_cfgs, ) } -#[derive(Clone)] -pub struct TestState { - // Full test configuration - config: TestConfiguration, - // A cycle iterator on all PoV sizes used in the test. - pov_sizes: Cycle>, - // Generated candidate receipts to be used in the test - candidates: Cycle>, - // Map from pov size to candidate index - pov_size_to_candidate: HashMap, - // Map from generated candidate hashes to candidate index in `available_data` - // and `chunks`. - candidate_hashes: HashMap, - // Per candidate index receipts. - candidate_receipt_templates: Vec, - // Per candidate index `AvailableData` - available_data: Vec, - // Per candiadte index chunks - chunks: Vec>, - // Per relay chain block - candidate backed by our backing group - backed_candidates: Vec, -} - -impl TestState { - pub fn next_candidate(&mut self) -> Option { - let candidate = self.candidates.next(); - let candidate_hash = candidate.as_ref().unwrap().hash(); - gum::trace!(target: LOG_TARGET, "Next candidate selected {:?}", candidate_hash); - candidate - } - - /// Generate candidates to be used in the test. - fn generate_candidates(&mut self) { - let count = self.config.n_cores * self.config.num_blocks; - gum::info!(target: LOG_TARGET,"{}", format!("Pre-generating {} candidates.", count).bright_blue()); - - // Generate all candidates - self.candidates = (0..count) - .map(|index| { - let pov_size = self.pov_sizes.next().expect("This is a cycle; qed"); - let candidate_index = *self - .pov_size_to_candidate - .get(&pov_size) - .expect("pov_size always exists; qed"); - let mut candidate_receipt = - self.candidate_receipt_templates[candidate_index].clone(); - - // Make it unique. - candidate_receipt.descriptor.relay_parent = Hash::from_low_u64_be(index as u64); - // Store the new candidate in the state - self.candidate_hashes.insert(candidate_receipt.hash(), candidate_index); - - gum::debug!(target: LOG_TARGET, candidate_hash = ?candidate_receipt.hash(), "new candidate"); - - candidate_receipt - }) - .collect::>() - .into_iter() - .cycle(); - } - - pub fn new(config: &TestConfiguration) -> Self { - let config = config.clone(); - - let mut chunks = Vec::new(); - let mut available_data = Vec::new(); - let mut candidate_receipt_templates = Vec::new(); - let mut pov_size_to_candidate = HashMap::new(); - - // we use it for all candidates. - let persisted_validation_data = PersistedValidationData { - parent_head: HeadData(vec![7, 8, 9]), - relay_parent_number: Default::default(), - max_pov_size: 1024, - relay_parent_storage_root: Default::default(), - }; - - // For each unique pov we create a candidate receipt. - for (index, pov_size) in config.pov_sizes().iter().cloned().unique().enumerate() { - gum::info!(target: LOG_TARGET, index, pov_size, "{}", "Generating template candidate".bright_blue()); - - let mut candidate_receipt = dummy_candidate_receipt(dummy_hash()); - let pov = PoV { block_data: BlockData(vec![index as u8; pov_size]) }; - - let new_available_data = AvailableData { - validation_data: persisted_validation_data.clone(), - pov: Arc::new(pov), - }; - - let (new_chunks, erasure_root) = derive_erasure_chunks_with_proofs_and_root( - config.n_validators, - &new_available_data, - |_, _| {}, - ); - - candidate_receipt.descriptor.erasure_root = erasure_root; - - chunks.push(new_chunks); - available_data.push(new_available_data); - pov_size_to_candidate.insert(pov_size, index); - candidate_receipt_templates.push(candidate_receipt); - } - - gum::info!(target: LOG_TARGET, "{}","Created test environment.".bright_blue()); - - let mut _self = Self { - available_data, - candidate_receipt_templates, - chunks, - pov_size_to_candidate, - pov_sizes: Vec::from(config.pov_sizes()).into_iter().cycle(), - candidate_hashes: HashMap::new(), - candidates: Vec::new().into_iter().cycle(), - backed_candidates: Vec::new(), - config, - }; - - _self.generate_candidates(); - _self - } - - pub fn backed_candidates(&mut self) -> &mut Vec { - &mut self.backed_candidates - } -} - pub async fn benchmark_availability_read( benchmark_name: &str, env: &mut TestEnvironment, - mut state: TestState, + state: &TestState, ) -> BenchmarkUsage { let config = env.config().clone(); - env.import_block(new_block_import_info(Hash::repeat_byte(1), 1)).await; - - let test_start = Instant::now(); - let mut batch = FuturesUnordered::new(); - let mut availability_bytes = 0u128; - env.metrics().set_n_validators(config.n_validators); env.metrics().set_n_cores(config.n_cores); - for block_num in 1..=env.config().num_blocks { + let mut batch = FuturesUnordered::new(); + let mut availability_bytes = 0u128; + let mut candidates = state.candidates.clone(); + let test_start = Instant::now(); + for block_info in state.block_infos.iter() { + let block_num = block_info.number as usize; gum::info!(target: LOG_TARGET, "Current block {}/{}", block_num, env.config().num_blocks); env.metrics().set_current_block(block_num); let block_start_ts = Instant::now(); + env.import_block(block_info.clone()).await; + for candidate_num in 0..config.n_cores as u64 { let candidate = - state.next_candidate().expect("We always send up to n_cores*num_blocks; qed"); + candidates.next().expect("We always send up to n_cores*num_blocks; qed"); let (tx, rx) = oneshot::channel(); batch.push(rx); @@ -519,7 +319,7 @@ pub async fn benchmark_availability_read( pub async fn benchmark_availability_write( benchmark_name: &str, env: &mut TestEnvironment, - mut state: TestState, + state: &TestState, ) -> BenchmarkUsage { let config = env.config().clone(); @@ -527,7 +327,7 @@ pub async fn benchmark_availability_write( env.metrics().set_n_cores(config.n_cores); gum::info!(target: LOG_TARGET, "Seeding availability store with candidates ..."); - for backed_candidate in state.backed_candidates().clone() { + for backed_candidate in state.backed_candidates.clone() { let candidate_index = *state.candidate_hashes.get(&backed_candidate.hash()).unwrap(); let available_data = state.available_data[candidate_index].clone(); let (tx, rx) = oneshot::channel(); @@ -550,15 +350,14 @@ pub async fn benchmark_availability_write( gum::info!(target: LOG_TARGET, "Done"); let test_start = Instant::now(); - - for block_num in 1..=env.config().num_blocks { + for block_info in state.block_infos.iter() { + let block_num = block_info.number as usize; gum::info!(target: LOG_TARGET, "Current block #{}", block_num); env.metrics().set_current_block(block_num); let block_start_ts = Instant::now(); - let relay_block_hash = Hash::repeat_byte(block_num as u8); - env.import_block(new_block_import_info(relay_block_hash, block_num as BlockNumber)) - .await; + let relay_block_hash = block_info.hash; + env.import_block(block_info.clone()).await; // Inform bitfield distribution about our view of current test block let message = polkadot_node_subsystem_types::messages::BitfieldDistributionMessage::NetworkBridgeUpdate( @@ -569,20 +368,13 @@ pub async fn benchmark_availability_write( let chunk_fetch_start_ts = Instant::now(); // Request chunks of our own backed candidate from all other validators. - let mut receivers = Vec::new(); - for index in 1..config.n_validators { + let payloads = state.chunk_fetching_requests.get(block_num - 1).expect("pregenerated"); + let receivers = (1..config.n_validators).filter_map(|index| { let (pending_response, pending_response_receiver) = oneshot::channel(); - let request = RawIncomingRequest { - peer: PeerId::random(), - payload: ChunkFetchingRequest { - candidate_hash: state.backed_candidates()[block_num - 1].hash(), - index: ValidatorIndex(index as u32), - } - .encode(), - pending_response, - }; - + let peer_id = *env.authorities().peer_ids.get(index).expect("all validators have ids"); + let payload = payloads.get(index).expect("pregenerated").clone(); + let request = RawIncomingRequest { peer: peer_id, payload, pending_response }; let peer = env .authorities() .validator_authority_id @@ -592,59 +384,39 @@ pub async fn benchmark_availability_write( if env.network().is_peer_connected(peer) && env.network().send_request_from_peer(peer, request).is_ok() { - receivers.push(pending_response_receiver); + Some(pending_response_receiver) + } else { + None } - } + }); gum::info!(target: LOG_TARGET, "Waiting for all emulated peers to receive their chunk from us ..."); - for receiver in receivers.into_iter() { - let response = receiver.await.expect("Chunk is always served successfully"); - // TODO: check if chunk is the one the peer expects to receive. - assert!(response.result.is_ok()); - } - let chunk_fetch_duration = Instant::now().sub(chunk_fetch_start_ts).as_millis(); + let responses = futures::future::try_join_all(receivers) + .await + .expect("Chunk is always served successfully"); + // TODO: check if chunk is the one the peer expects to receive. + assert!(responses.iter().all(|v| v.result.is_ok())); + let chunk_fetch_duration = Instant::now().sub(chunk_fetch_start_ts).as_millis(); gum::info!(target: LOG_TARGET, "All chunks received in {}ms", chunk_fetch_duration); - let signing_context = SigningContext { session_index: 0, parent_hash: relay_block_hash }; let network = env.network().clone(); let authorities = env.authorities().clone(); - let n_validators = config.n_validators; - // Spawn a task that will generate `n_validator` - 1 signed bitfiends and + // Spawn a task that will generate `n_validator` - 1 signed bitfields and // send them from the emulated peers to the subsystem. // TODO: Implement topology. - env.spawn_blocking("send-bitfields", async move { - for index in 1..n_validators { - let validator_public = - authorities.validator_public.get(index).expect("All validator keys are known"); - - // Node has all the chunks in the world. - let payload: AvailabilityBitfield = - AvailabilityBitfield(bitvec![u8, bitvec::order::Lsb0; 1u8; 32]); - // TODO(soon): Use pre-signed messages. This is quite intensive on the CPU. - let signed_bitfield = Signed::::sign( - &authorities.keyring.keystore(), - payload, - &signing_context, - ValidatorIndex(index as u32), - validator_public, - ) - .ok() - .flatten() - .expect("should be signed"); - - let from_peer = &authorities.validator_authority_id[index]; - - let message = peer_bitfield_message_v2(relay_block_hash, signed_bitfield); + let messages = state.signed_bitfields.get(&relay_block_hash).expect("pregenerated").clone(); + for index in 1..config.n_validators { + let from_peer = &authorities.validator_authority_id[index]; + let message = messages.get(index).expect("pregenerated").clone(); - // Send the action from peer only if it is connected to our node. - if network.is_peer_connected(from_peer) { - let _ = network.send_message_from_peer(from_peer, message); - } + // Send the action from peer only if it is connected to our node. + if network.is_peer_connected(from_peer) { + let _ = network.send_message_from_peer(from_peer, message); } - }); + } gum::info!( "Waiting for {} bitfields to be received and processed", @@ -653,7 +425,7 @@ pub async fn benchmark_availability_write( // Wait for all bitfields to be processed. env.wait_until_metric( - "polkadot_parachain_received_availabilty_bitfields_total", + "polkadot_parachain_received_availability_bitfields_total", None, |value| value == (config.connected_count() * block_num) as f64, ) @@ -679,17 +451,3 @@ pub async fn benchmark_availability_write( &["availability-distribution", "bitfield-distribution", "availability-store"], ) } - -pub fn peer_bitfield_message_v2( - relay_hash: H256, - signed_bitfield: Signed, -) -> VersionedValidationProtocol { - let bitfield = polkadot_node_network_protocol::v2::BitfieldDistributionMessage::Bitfield( - relay_hash, - signed_bitfield.into(), - ); - - Versioned::V2(polkadot_node_network_protocol::v2::ValidationProtocol::BitfieldDistribution( - bitfield, - )) -} diff --git a/polkadot/node/subsystem-bench/src/lib/availability/test_state.rs b/polkadot/node/subsystem-bench/src/lib/availability/test_state.rs new file mode 100644 index 0000000000000000000000000000000000000000..c328ffedf916e1ae9ce7fcd05e25750e01dc7506 --- /dev/null +++ b/polkadot/node/subsystem-bench/src/lib/availability/test_state.rs @@ -0,0 +1,268 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::configuration::{TestAuthorities, TestConfiguration}; +use bitvec::bitvec; +use colored::Colorize; +use itertools::Itertools; +use parity_scale_codec::Encode; +use polkadot_node_network_protocol::{ + request_response::v1::ChunkFetchingRequest, Versioned, VersionedValidationProtocol, +}; +use polkadot_node_primitives::{AvailableData, BlockData, ErasureChunk, PoV}; +use polkadot_node_subsystem_test_helpers::{ + derive_erasure_chunks_with_proofs_and_root, mock::new_block_import_info, +}; +use polkadot_overseer::BlockInfo; +use polkadot_primitives::{ + AvailabilityBitfield, BlockNumber, CandidateHash, CandidateReceipt, Hash, HeadData, Header, + PersistedValidationData, Signed, SigningContext, ValidatorIndex, +}; +use polkadot_primitives_test_helpers::{dummy_candidate_receipt, dummy_hash}; +use sp_core::H256; +use std::{collections::HashMap, iter::Cycle, sync::Arc}; + +const LOG_TARGET: &str = "subsystem-bench::availability::test_state"; + +#[derive(Clone)] +pub struct TestState { + // Full test configuration + pub config: TestConfiguration, + // A cycle iterator on all PoV sizes used in the test. + pub pov_sizes: Cycle>, + // Generated candidate receipts to be used in the test + pub candidates: Cycle>, + // Map from pov size to candidate index + pub pov_size_to_candidate: HashMap, + // Map from generated candidate hashes to candidate index in `available_data` and `chunks`. + pub candidate_hashes: HashMap, + // Per candidate index receipts. + pub candidate_receipt_templates: Vec, + // Per candidate index `AvailableData` + pub available_data: Vec, + // Per candiadte index chunks + pub chunks: Vec>, + // Per relay chain block - candidate backed by our backing group + pub backed_candidates: Vec, + // Relay chain block infos + pub block_infos: Vec, + // Chung fetching requests for backed candidates + pub chunk_fetching_requests: Vec>>, + // Pregenerated signed availability bitfields + pub signed_bitfields: HashMap>, + // Relay chain block headers + pub block_headers: HashMap, + // Authority keys for the network emulation. + pub test_authorities: TestAuthorities, + // Map from generated candidate receipts + pub candidate_receipts: HashMap>, +} + +impl TestState { + pub fn new(config: &TestConfiguration) -> Self { + let mut test_state = Self { + available_data: Default::default(), + candidate_receipt_templates: Default::default(), + chunks: Default::default(), + pov_size_to_candidate: Default::default(), + pov_sizes: Vec::from(config.pov_sizes()).into_iter().cycle(), + candidate_hashes: HashMap::new(), + candidates: Vec::new().into_iter().cycle(), + backed_candidates: Vec::new(), + config: config.clone(), + block_infos: Default::default(), + chunk_fetching_requests: Default::default(), + signed_bitfields: Default::default(), + candidate_receipts: Default::default(), + block_headers: Default::default(), + test_authorities: config.generate_authorities(), + }; + + // we use it for all candidates. + let persisted_validation_data = PersistedValidationData { + parent_head: HeadData(vec![7, 8, 9]), + relay_parent_number: Default::default(), + max_pov_size: 1024, + relay_parent_storage_root: Default::default(), + }; + + // For each unique pov we create a candidate receipt. + for (index, pov_size) in config.pov_sizes().iter().cloned().unique().enumerate() { + gum::info!(target: LOG_TARGET, index, pov_size, "{}", "Generating template candidate".bright_blue()); + + let mut candidate_receipt = dummy_candidate_receipt(dummy_hash()); + let pov = PoV { block_data: BlockData(vec![index as u8; pov_size]) }; + + let new_available_data = AvailableData { + validation_data: persisted_validation_data.clone(), + pov: Arc::new(pov), + }; + + let (new_chunks, erasure_root) = derive_erasure_chunks_with_proofs_and_root( + config.n_validators, + &new_available_data, + |_, _| {}, + ); + + candidate_receipt.descriptor.erasure_root = erasure_root; + + test_state.chunks.push(new_chunks); + test_state.available_data.push(new_available_data); + test_state.pov_size_to_candidate.insert(pov_size, index); + test_state.candidate_receipt_templates.push(candidate_receipt); + } + + test_state.block_infos = (1..=config.num_blocks) + .map(|block_num| { + let relay_block_hash = Hash::repeat_byte(block_num as u8); + new_block_import_info(relay_block_hash, block_num as BlockNumber) + }) + .collect(); + + test_state.block_headers = test_state + .block_infos + .iter() + .map(|info| { + ( + info.hash, + Header { + digest: Default::default(), + number: info.number, + parent_hash: info.parent_hash, + extrinsics_root: Default::default(), + state_root: Default::default(), + }, + ) + }) + .collect::>(); + + // Generate all candidates + let candidates_count = config.n_cores * config.num_blocks; + gum::info!(target: LOG_TARGET,"{}", format!("Pre-generating {} candidates.", candidates_count).bright_blue()); + test_state.candidates = (0..candidates_count) + .map(|index| { + let pov_size = test_state.pov_sizes.next().expect("This is a cycle; qed"); + let candidate_index = *test_state + .pov_size_to_candidate + .get(&pov_size) + .expect("pov_size always exists; qed"); + let mut candidate_receipt = + test_state.candidate_receipt_templates[candidate_index].clone(); + + // Make it unique. + candidate_receipt.descriptor.relay_parent = Hash::from_low_u64_be(index as u64); + // Store the new candidate in the state + test_state.candidate_hashes.insert(candidate_receipt.hash(), candidate_index); + + gum::debug!(target: LOG_TARGET, candidate_hash = ?candidate_receipt.hash(), "new candidate"); + + candidate_receipt + }) + .collect::>() + .into_iter() + .cycle(); + + // Prepare per block candidates. + // Genesis block is always finalized, so we start at 1. + for info in test_state.block_infos.iter() { + for _ in 0..config.n_cores { + let receipt = test_state.candidates.next().expect("Cycle iterator"); + test_state.candidate_receipts.entry(info.hash).or_default().push(receipt); + } + + // First candidate is our backed candidate. + test_state.backed_candidates.push( + test_state + .candidate_receipts + .get(&info.hash) + .expect("just inserted above") + .first() + .expect("just inserted above") + .clone(), + ); + } + + test_state.chunk_fetching_requests = test_state + .backed_candidates + .iter() + .map(|candidate| { + (0..config.n_validators) + .map(|index| { + ChunkFetchingRequest { + candidate_hash: candidate.hash(), + index: ValidatorIndex(index as u32), + } + .encode() + }) + .collect::>() + }) + .collect::>(); + + test_state.signed_bitfields = test_state + .block_infos + .iter() + .map(|block_info| { + let signing_context = + SigningContext { session_index: 0, parent_hash: block_info.hash }; + let messages = (0..config.n_validators) + .map(|index| { + let validator_public = test_state + .test_authorities + .validator_public + .get(index) + .expect("All validator keys are known"); + + // Node has all the chunks in the world. + let payload: AvailabilityBitfield = + AvailabilityBitfield(bitvec![u8, bitvec::order::Lsb0; 1u8; 32]); + let signed_bitfield = Signed::::sign( + &test_state.test_authorities.keyring.keystore(), + payload, + &signing_context, + ValidatorIndex(index as u32), + validator_public, + ) + .ok() + .flatten() + .expect("should be signed"); + + peer_bitfield_message_v2(block_info.hash, signed_bitfield) + }) + .collect::>(); + + (block_info.hash, messages) + }) + .collect(); + + gum::info!(target: LOG_TARGET, "{}","Created test environment.".bright_blue()); + + test_state + } +} + +fn peer_bitfield_message_v2( + relay_hash: H256, + signed_bitfield: Signed, +) -> VersionedValidationProtocol { + let bitfield = polkadot_node_network_protocol::v2::BitfieldDistributionMessage::Bitfield( + relay_hash, + signed_bitfield.into(), + ); + + Versioned::V2(polkadot_node_network_protocol::v2::ValidationProtocol::BitfieldDistribution( + bitfield, + )) +} diff --git a/polkadot/node/subsystem-bench/src/lib/configuration.rs b/polkadot/node/subsystem-bench/src/lib/configuration.rs index 17c81c4fd355bd15c5e2a6c1005e90313ca80615..5725a5137ec4bb602db15f88dfce6a300f1c4bcd 100644 --- a/polkadot/node/subsystem-bench/src/lib/configuration.rs +++ b/polkadot/node/subsystem-bench/src/lib/configuration.rs @@ -122,10 +122,10 @@ pub struct TestConfiguration { /// Randomly sampled pov_sizes #[serde(skip)] pub pov_sizes: Vec, - /// The amount of bandiwdth remote validators have. + /// The amount of bandwidth remote validators have. #[serde(default = "default_bandwidth")] pub peer_bandwidth: usize, - /// The amount of bandiwdth our node has. + /// The amount of bandwidth our node has. #[serde(default = "default_bandwidth")] pub bandwidth: usize, /// Optional peer emulation latency (round trip time) wrt node under test @@ -205,7 +205,7 @@ impl TestConfiguration { let peer_id_to_authority = peer_ids .iter() .zip(validator_authority_id.iter()) - .map(|(peer_id, authorithy_id)| (*peer_id, authorithy_id.clone())) + .map(|(peer_id, authority_id)| (*peer_id, authority_id.clone())) .collect(); TestAuthorities { diff --git a/polkadot/node/subsystem-bench/src/lib/environment.rs b/polkadot/node/subsystem-bench/src/lib/environment.rs index 958ed50d0894b76ddb66978950971620dceb4aa0..42955d0302232f35e387a044e34a0c7d665512e8 100644 --- a/polkadot/node/subsystem-bench/src/lib/environment.rs +++ b/polkadot/node/subsystem-bench/src/lib/environment.rs @@ -118,6 +118,7 @@ fn new_runtime() -> tokio::runtime::Runtime { .thread_name("subsystem-bench") .enable_all() .thread_stack_size(3 * 1024 * 1024) + .worker_threads(4) .build() .unwrap() } @@ -403,7 +404,7 @@ impl TestEnvironment { let total_cpu = test_env_cpu_metrics.sum_by("substrate_tasks_polling_duration_sum"); usage.push(ResourceUsage { - resource_name: "Test environment".to_string(), + resource_name: "test-environment".to_string(), total: total_cpu, per_block: total_cpu / num_blocks, }); diff --git a/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs b/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs index 9064f17940f004fc123bc25f24ad5de1704e2d38..fba33523be85dbfd241bc1fcf42d8f8069ebf95c 100644 --- a/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs @@ -39,8 +39,9 @@ pub struct AvailabilityStoreState { const LOG_TARGET: &str = "subsystem-bench::av-store-mock"; -/// Mockup helper. Contains Ccunks and full availability data of all parachain blocks +/// Mockup helper. Contains Chunks and full availability data of all parachain blocks /// used in a test. +#[derive(Clone)] pub struct NetworkAvailabilityState { pub candidate_hashes: HashMap, pub available_data: Vec, diff --git a/polkadot/node/subsystem-bench/src/lib/mock/chain_api.rs b/polkadot/node/subsystem-bench/src/lib/mock/chain_api.rs index bee15c3cefdfbda061a1313db6c631e52fd91c12..86b030fb6fdcf7248b57f7fd47cdee6085bf8377 100644 --- a/polkadot/node/subsystem-bench/src/lib/mock/chain_api.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/chain_api.rs @@ -89,7 +89,7 @@ impl MockChainApi { let hash = self .state .get_header_by_number(requested_number) - .expect("Unknow block number") + .expect("Unknown block number") .hash(); sender.send(Ok(Some(hash))).unwrap(); }, diff --git a/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs b/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs index d598f6447d3d43ece5e1d0fa6364508403c032e9..ec66ad4e279c217f7510d4c09def128f41fbffe5 100644 --- a/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs @@ -42,8 +42,8 @@ pub struct MockNetworkBridgeTx { network: NetworkEmulatorHandle, /// A channel to the network interface, to_network_interface: UnboundedSender, - /// Test authorithies - test_authorithies: TestAuthorities, + /// Test authorities + test_authorities: TestAuthorities, } /// A mock of the network bridge tx subsystem. @@ -58,9 +58,9 @@ impl MockNetworkBridgeTx { pub fn new( network: NetworkEmulatorHandle, to_network_interface: UnboundedSender, - test_authorithies: TestAuthorities, + test_authorities: TestAuthorities, ) -> MockNetworkBridgeTx { - Self { network, to_network_interface, test_authorithies } + Self { network, to_network_interface, test_authorities } } } @@ -125,13 +125,13 @@ impl MockNetworkBridgeTx { } }, NetworkBridgeTxMessage::ReportPeer(_) => { - // ingore rep changes + // ignore rep changes }, NetworkBridgeTxMessage::SendValidationMessage(peers, message) => { for peer in peers { self.to_network_interface .unbounded_send(NetworkMessage::MessageFromNode( - self.test_authorithies + self.test_authorities .peer_id_to_authority .get(&peer) .unwrap() diff --git a/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs b/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs index 53faf562f03c005238d5297c29117fd261b49eca..b73d61321cd3b274915dda37233c1945240c2b11 100644 --- a/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs @@ -26,7 +26,7 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_types::OverseerSignal; use polkadot_primitives::{ - vstaging::NodeFeatures, CandidateEvent, CandidateReceipt, CoreState, GroupIndex, IndexedVec, + CandidateEvent, CandidateReceipt, CoreState, GroupIndex, IndexedVec, NodeFeatures, OccupiedCore, SessionIndex, SessionInfo, ValidatorIndex, }; use sp_consensus_babe::Epoch as BabeEpoch; @@ -36,6 +36,7 @@ use std::collections::HashMap; const LOG_TARGET: &str = "subsystem-bench::runtime-api-mock"; /// Minimal state to answer requests. +#[derive(Clone)] pub struct RuntimeApiState { // All authorities in the test, authorities: TestAuthorities, @@ -49,6 +50,7 @@ pub struct RuntimeApiState { } /// A mocked `runtime-api` subsystem. +#[derive(Clone)] pub struct MockRuntimeApi { state: RuntimeApiState, config: TestConfiguration, diff --git a/polkadot/node/subsystem-bench/src/lib/network.rs b/polkadot/node/subsystem-bench/src/lib/network.rs index 1bc35a014ff5bf616bb02b0f8740679ef6b67489..0f7b7d741e778e4535ccad1fe032f39d290e3ed2 100644 --- a/polkadot/node/subsystem-bench/src/lib/network.rs +++ b/polkadot/node/subsystem-bench/src/lib/network.rs @@ -154,7 +154,7 @@ pub enum NetworkMessage { MessageFromNode(AuthorityDiscoveryId, VersionedValidationProtocol), /// A request originating from our node RequestFromNode(AuthorityDiscoveryId, Requests), - /// A request originating from an emultated peer + /// A request originating from an emulated peer RequestFromPeer(IncomingRequest), } @@ -790,9 +790,9 @@ pub fn new_network( let connected_count = config.connected_count(); - let mut peers_indicies = (0..n_peers).collect_vec(); + let mut peers_indices = (0..n_peers).collect_vec(); let (_connected, to_disconnect) = - peers_indicies.partial_shuffle(&mut thread_rng(), connected_count); + peers_indices.partial_shuffle(&mut thread_rng(), connected_count); // Node under test is always mark as disconnected. peers[NODE_UNDER_TEST as usize].disconnect(); @@ -958,7 +958,7 @@ impl Metrics { .inc_by(bytes as u64); } - /// Increment total receioved for a peer. + /// Increment total received for a peer. pub fn on_peer_received(&self, peer_index: usize, bytes: usize) { self.peer_total_received .with_label_values(vec![format!("node{}", peer_index).as_str()].as_slice()) @@ -1041,7 +1041,7 @@ mod tests { async fn test_expected_rate() { let tick_rate = 200; let budget = 1_000_000; - // rate must not exceeed 100 credits per second + // rate must not exceed 100 credits per second let mut rate_limiter = RateLimit::new(tick_rate, budget); let mut total_sent = 0usize; let start = Instant::now(); diff --git a/polkadot/node/subsystem-bench/src/lib/usage.rs b/polkadot/node/subsystem-bench/src/lib/usage.rs index ef60d67372ae84ed0f5a434720f21a105dee0235..59296746ec3d4154274ce68d9ee910bb61d0f9f8 100644 --- a/polkadot/node/subsystem-bench/src/lib/usage.rs +++ b/polkadot/node/subsystem-bench/src/lib/usage.rs @@ -17,6 +17,7 @@ //! Test usage implementation use colored::Colorize; +use itertools::Itertools; use serde::{Deserialize, Serialize}; use std::collections::HashMap; @@ -37,10 +38,16 @@ impl std::fmt::Display for BenchmarkUsage { self.network_usage .iter() .map(|v| v.to_string()) + .sorted() .collect::>() .join("\n"), format!("{:<32}{:>12}{:>12}", "CPU usage, seconds", "total", "per block").blue(), - self.cpu_usage.iter().map(|v| v.to_string()).collect::>().join("\n") + self.cpu_usage + .iter() + .map(|v| v.to_string()) + .sorted() + .collect::>() + .join("\n") ) } } @@ -75,6 +82,27 @@ impl BenchmarkUsage { _ => None, } } + + // Prepares a json string for a graph representation + // See: https://github.com/benchmark-action/github-action-benchmark?tab=readme-ov-file#examples + pub fn to_chart_json(&self) -> color_eyre::eyre::Result { + let chart = self + .network_usage + .iter() + .map(|v| ChartItem { + name: v.resource_name.clone(), + unit: "KiB".to_string(), + value: v.per_block, + }) + .chain(self.cpu_usage.iter().map(|v| ChartItem { + name: v.resource_name.clone(), + unit: "seconds".to_string(), + value: v.per_block, + })) + .collect::>(); + + Ok(serde_json::to_string(&chart)?) + } } fn check_usage( @@ -101,8 +129,8 @@ fn check_resource_usage( None } else { Some(format!( - "The resource `{}` is expected to be equal to {} with a precision {}, but the current value is {}", - resource_name, base, precision, usage.per_block + "The resource `{}` is expected to be equal to {} with a precision {}, but the current value is {} ({})", + resource_name, base, precision, usage.per_block, diff )) } } else { @@ -119,7 +147,7 @@ pub struct ResourceUsage { impl std::fmt::Display for ResourceUsage { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{:<32}{:>12.3}{:>12.3}", self.resource_name.cyan(), self.total, self.per_block) + write!(f, "{:<32}{:>12.4}{:>12.4}", self.resource_name.cyan(), self.total, self.per_block) } } @@ -144,3 +172,10 @@ impl ResourceUsage { } type ResourceUsageCheck<'a> = (&'a str, f64, f64); + +#[derive(Debug, Serialize)] +pub struct ChartItem { + pub name: String, + pub unit: String, + pub value: f64, +} diff --git a/polkadot/node/subsystem-bench/src/lib/utils.rs b/polkadot/node/subsystem-bench/src/lib/utils.rs index 75b72cc11b98271f1b92e2734029b3e34d091bd6..b3cd3a88b6c1324ec18a402cbc92524f4865380f 100644 --- a/polkadot/node/subsystem-bench/src/lib/utils.rs +++ b/polkadot/node/subsystem-bench/src/lib/utils.rs @@ -16,61 +16,26 @@ //! Test utils -use crate::usage::BenchmarkUsage; -use std::io::{stdout, Write}; - -pub struct WarmUpOptions<'a> { - /// The maximum number of runs considered for marming up. - pub warm_up: usize, - /// The number of runs considered for benchmarking. - pub bench: usize, - /// The difference in CPU usage between runs considered as normal - pub precision: f64, - /// The subsystems whose CPU usage is checked during warm-up cycles - pub subsystems: &'a [&'a str], -} - -impl<'a> WarmUpOptions<'a> { - pub fn new(subsystems: &'a [&'a str]) -> Self { - Self { warm_up: 100, bench: 3, precision: 0.02, subsystems } - } -} - -pub fn warm_up_and_benchmark( - options: WarmUpOptions, - run: impl Fn() -> BenchmarkUsage, -) -> Result { - println!("Warming up..."); - let mut usages = Vec::with_capacity(options.bench); - - for n in 1..=options.warm_up { - let curr = run(); - if let Some(prev) = usages.last() { - let diffs = options - .subsystems - .iter() - .map(|&v| { - curr.cpu_usage_diff(prev, v) - .ok_or(format!("{} not found in benchmark {:?}", v, prev)) - }) - .collect::, String>>()?; - if !diffs.iter().all(|&v| v < options.precision) { - usages.clear(); - } - } - usages.push(curr); - print!("\r{}%", n * 100 / options.warm_up); - if usages.len() == options.bench { - println!("\rTook {} runs to warm up", n.saturating_sub(options.bench)); - break; - } - stdout().flush().unwrap(); - } - - if usages.len() != options.bench { - println!("Didn't warm up after {} runs", options.warm_up); - return Err("Can't warm up".to_string()) +use std::{fs::File, io::Write}; + +// Saves a given string to a file +pub fn save_to_file(path: &str, value: String) -> color_eyre::eyre::Result<()> { + let output = std::process::Command::new(env!("CARGO")) + .arg("locate-project") + .arg("--workspace") + .arg("--message-format=plain") + .output() + .unwrap() + .stdout; + let workspace_dir = std::path::Path::new(std::str::from_utf8(&output).unwrap().trim()) + .parent() + .unwrap(); + let path = workspace_dir.join(path); + if let Some(dir) = path.parent() { + std::fs::create_dir_all(dir)?; } + let mut file = File::create(path)?; + file.write_all(value.as_bytes())?; - Ok(BenchmarkUsage::average(&usages)) + Ok(()) } diff --git a/polkadot/node/subsystem-test-helpers/Cargo.toml b/polkadot/node/subsystem-test-helpers/Cargo.toml index c71f030568d9da378da61c7c4af90a409f936b64..57678e8e8d4a1057ec843f097c2cd9811f33a6bb 100644 --- a/polkadot/node/subsystem-test-helpers/Cargo.toml +++ b/polkadot/node/subsystem-test-helpers/Cargo.toml @@ -11,8 +11,8 @@ license.workspace = true workspace = true [dependencies] -async-trait = "0.1.74" -futures = "0.3.21" +async-trait = "0.1.79" +futures = "0.3.30" parking_lot = "0.12.1" polkadot-node-subsystem = { path = "../subsystem" } polkadot-erasure-coding = { path = "../../erasure-coding" } diff --git a/polkadot/node/subsystem-types/Cargo.toml b/polkadot/node/subsystem-types/Cargo.toml index 54c8f7e2ade773f1df84fc8cef1825c266364633..1019077638774ca62bdb370b3ed9425254935d90 100644 --- a/polkadot/node/subsystem-types/Cargo.toml +++ b/polkadot/node/subsystem-types/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] derive_more = "0.99.17" -futures = "0.3.21" +futures = "0.3.30" polkadot-primitives = { path = "../../primitives" } polkadot-node-primitives = { path = "../primitives" } polkadot-node-network-protocol = { path = "../network/protocol" } @@ -29,5 +29,5 @@ sc-transaction-pool-api = { path = "../../../substrate/client/transaction-pool/a smallvec = "1.8.0" substrate-prometheus-endpoint = { path = "../../../substrate/utils/prometheus" } thiserror = { workspace = true } -async-trait = "0.1.74" +async-trait = "0.1.79" bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index 5115efa853c185c508b0ec01e7020aa790018ba7..2ca6728af012166649ccfee136b098cff9b9fe4a 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -42,16 +42,14 @@ use polkadot_node_primitives::{ ValidationResult, }; use polkadot_primitives::{ - async_backing, slashing, - vstaging::{ApprovalVotingParams, NodeFeatures}, - AuthorityDiscoveryId, BackedCandidate, BlockNumber, CandidateEvent, CandidateHash, - CandidateIndex, CandidateReceipt, CollatorId, CommittedCandidateReceipt, CoreIndex, CoreState, - DisputeState, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, HeadData, - Header as BlockHeader, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, - MultiDisputeStatementSet, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, - PvfExecKind, SessionIndex, SessionInfo, SignedAvailabilityBitfield, - SignedAvailabilityBitfields, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, - ValidatorSignature, + async_backing, slashing, ApprovalVotingParams, AuthorityDiscoveryId, BackedCandidate, + BlockNumber, CandidateEvent, CandidateHash, CandidateIndex, CandidateReceipt, CollatorId, + CommittedCandidateReceipt, CoreIndex, CoreState, DisputeState, ExecutorParams, GroupIndex, + GroupRotationInfo, Hash, HeadData, Header as BlockHeader, Id as ParaId, InboundDownwardMessage, + InboundHrmpMessage, MultiDisputeStatementSet, NodeFeatures, OccupiedCoreAssumption, + PersistedValidationData, PvfCheckStatement, PvfExecKind, SessionIndex, SessionInfo, + SignedAvailabilityBitfield, SignedAvailabilityBitfields, ValidationCode, ValidationCodeHash, + ValidatorId, ValidatorIndex, ValidatorSignature, }; use polkadot_statement_table::v2::Misbehavior; use std::{ @@ -82,8 +80,15 @@ pub struct CanSecondRequest { pub enum CandidateBackingMessage { /// Requests a set of backable candidates attested by the subsystem. /// - /// Each pair is (candidate_hash, candidate_relay_parent). - GetBackedCandidates(Vec<(CandidateHash, Hash)>, oneshot::Sender>), + /// The order of candidates of the same para must be preserved in the response. + /// If a backed candidate of a para cannot be retrieved, the response should not contain any + /// candidates of the same para that follow it in the input vector. In other words, assuming + /// candidates are supplied in dependency order, we must ensure that this dependency order is + /// preserved. + GetBackedCandidates( + HashMap>, + oneshot::Sender>>, + ), /// Request the subsystem to check whether it's allowed to second given candidate. /// The rule is to only fetch collations that are either built on top of the root /// of some fragment tree or have a parent node which represents backed candidate. @@ -221,6 +226,8 @@ pub enum CollatorProtocolMessage { /// The result sender should be informed when at least one parachain validator seconded the /// collation. It is also completely okay to just drop the sender. result_sender: Option>, + /// The core index where the candidate should be backed. + core_index: CoreIndex, }, /// Report a collator as having provided an invalid collation. This should lead to disconnect /// and blacklist of the collator. @@ -798,7 +805,7 @@ pub enum StatementDistributionMessage { /// This data becomes intrinsics or extrinsics which should be included in a future relay chain /// block. -// It needs to be cloneable because multiple potential block authors can request copies. +// It needs to be clonable because multiple potential block authors can request copies. #[derive(Debug, Clone)] pub enum ProvisionableData { /// This bitfield indicates the availability of various candidate blocks. diff --git a/polkadot/node/subsystem-types/src/runtime_client.rs b/polkadot/node/subsystem-types/src/runtime_client.rs index 7474b4120cc918b8d3a880bbd09ab022b6a4445c..664d10ed1af5fdb77c3105ab995ab5ff89df2220 100644 --- a/polkadot/node/subsystem-types/src/runtime_client.rs +++ b/polkadot/node/subsystem-types/src/runtime_client.rs @@ -16,16 +16,12 @@ use async_trait::async_trait; use polkadot_primitives::{ - async_backing, - runtime_api::ParachainHost, - slashing, - vstaging::{self, ApprovalVotingParams}, - Block, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreIndex, CoreState, DisputeState, ExecutorParams, - GroupRotationInfo, Hash, Header, Id, InboundDownwardMessage, InboundHrmpMessage, - OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, - SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, - ValidatorSignature, + async_backing, runtime_api::ParachainHost, slashing, ApprovalVotingParams, Block, BlockNumber, + CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreIndex, + CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, Header, Id, + InboundDownwardMessage, InboundHrmpMessage, NodeFeatures, OccupiedCoreAssumption, + PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, + ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; use sc_client_api::{AuxStore, HeaderBackend}; use sc_transaction_pool_api::OffchainTransactionPoolFactory; @@ -324,7 +320,7 @@ pub trait RuntimeApiSubsystemClient { // === v9 === /// Get the node features. - async fn node_features(&self, at: Hash) -> Result; + async fn node_features(&self, at: Hash) -> Result; // == v10: Approval voting params == /// Approval voting configuration parameters @@ -586,7 +582,7 @@ where self.client.runtime_api().async_backing_params(at) } - async fn node_features(&self, at: Hash) -> Result { + async fn node_features(&self, at: Hash) -> Result { self.client.runtime_api().node_features(at) } diff --git a/polkadot/node/subsystem-util/Cargo.toml b/polkadot/node/subsystem-util/Cargo.toml index a668f8de76a0bdf15b1e4498924279b5fc467d3e..79a6a75e4cdfb5cf644dc8f963e51d0ef49c1fcd 100644 --- a/polkadot/node/subsystem-util/Cargo.toml +++ b/polkadot/node/subsystem-util/Cargo.toml @@ -10,8 +10,8 @@ license.workspace = true workspace = true [dependencies] -async-trait = "0.1.74" -futures = "0.3.21" +async-trait = "0.1.79" +futures = "0.3.30" futures-channel = "0.3.23" itertools = "0.10" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } @@ -44,8 +44,8 @@ parity-db = { version = "0.4.12" } [dev-dependencies] assert_matches = "1.4.0" -env_logger = "0.9.0" -futures = { version = "0.3.21", features = ["thread-pool"] } +env_logger = "0.11" +futures = { version = "0.3.30", features = ["thread-pool"] } log = { workspace = true, default-features = true } polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } lazy_static = "1.4.0" diff --git a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs index c7b91bffb3d705b27ee4c611985328dd1b1b7e77..d38d838fedefac643253528ab99d0035eeae55da 100644 --- a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs +++ b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs @@ -78,7 +78,7 @@ /// /// 2. The root fragment is invalid under the new constraints because it has been subsumed by /// the relay-chain. In this case, we can discard the root and split & re-root the fragment -/// tree under its descendents and compare to the new constraints again. This is the +/// tree under its descendants and compare to the new constraints again. This is the /// "prediction came true" case. /// /// 3. The root fragment is invalid under the new constraints because a competing parachain diff --git a/polkadot/node/subsystem-util/src/lib.rs b/polkadot/node/subsystem-util/src/lib.rs index aaae30db50cdb532bad58bb500b0ba93400f77f7..83b046f0bf0ac54533958da36dc230e8c8ce2922 100644 --- a/polkadot/node/subsystem-util/src/lib.rs +++ b/polkadot/node/subsystem-util/src/lib.rs @@ -30,7 +30,7 @@ use polkadot_node_subsystem::{ messages::{RuntimeApiMessage, RuntimeApiRequest, RuntimeApiSender}, overseer, SubsystemSender, }; -use polkadot_primitives::{slashing, CoreIndex, ExecutorParams}; +use polkadot_primitives::{async_backing::BackingState, slashing, CoreIndex, ExecutorParams}; pub use overseer::{ gen::{OrchestraError as OverseerError, Timeout}, @@ -308,6 +308,7 @@ specialize_requests! { fn request_disabled_validators() -> Vec; DisabledValidators; fn request_async_backing_params() -> AsyncBackingParams; AsyncBackingParams; fn request_claim_queue() -> BTreeMap>; ClaimQueue; + fn request_para_backing_state(para_id: ParaId) -> Option; ParaBackingState; } /// Requests executor parameters from the runtime effective at given relay-parent. First obtains @@ -382,7 +383,7 @@ pub fn signing_key_and_index<'a>( /// Sign the given data with the given validator ID. /// -/// Returns `Ok(None)` if the private key that correponds to that validator ID is not found in the +/// Returns `Ok(None)` if the private key that corresponds to that validator ID is not found in the /// given keystore. Returns an error if the key could not be used for signing. pub fn sign( keystore: &KeystorePtr, diff --git a/polkadot/node/subsystem-util/src/runtime/mod.rs b/polkadot/node/subsystem-util/src/runtime/mod.rs index 481625acb321994c58482c14aaf2860f092ae0c3..714384b32e37bcbd6ba7f077252fa23f50eac490 100644 --- a/polkadot/node/subsystem-util/src/runtime/mod.rs +++ b/polkadot/node/subsystem-util/src/runtime/mod.rs @@ -30,12 +30,11 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_types::UnpinHandle; use polkadot_primitives::{ - slashing, - vstaging::{node_features::FeatureIndex, NodeFeatures}, - AsyncBackingParams, CandidateEvent, CandidateHash, CoreState, EncodeAs, ExecutorParams, - GroupIndex, GroupRotationInfo, Hash, IndexedVec, OccupiedCore, ScrapedOnChainVotes, - SessionIndex, SessionInfo, Signed, SigningContext, UncheckedSigned, ValidationCode, - ValidationCodeHash, ValidatorId, ValidatorIndex, LEGACY_MIN_BACKING_VOTES, + node_features::FeatureIndex, slashing, AsyncBackingParams, CandidateEvent, CandidateHash, + CoreState, EncodeAs, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, IndexedVec, + NodeFeatures, OccupiedCore, ScrapedOnChainVotes, SessionIndex, SessionInfo, Signed, + SigningContext, UncheckedSigned, ValidationCode, ValidationCodeHash, ValidatorId, + ValidatorIndex, LEGACY_MIN_BACKING_VOTES, }; use crate::{ diff --git a/polkadot/node/subsystem-util/src/vstaging.rs b/polkadot/node/subsystem-util/src/vstaging.rs index 3e807eff5387693bc00198a3be5f257778bea0f2..b166a54f75c46ebfe4dd8d06af32a3dea3495b7a 100644 --- a/polkadot/node/subsystem-util/src/vstaging.rs +++ b/polkadot/node/subsystem-util/src/vstaging.rs @@ -19,14 +19,45 @@ //! This module is intended to contain common boiler plate code handling unreleased runtime API //! calls. +use std::collections::{BTreeMap, VecDeque}; + use polkadot_node_subsystem_types::messages::{RuntimeApiMessage, RuntimeApiRequest}; use polkadot_overseer::SubsystemSender; -use polkadot_primitives::{Hash, ValidatorIndex}; +use polkadot_primitives::{CoreIndex, Hash, Id as ParaId, ValidatorIndex}; -use crate::{has_required_runtime, request_disabled_validators, runtime}; +use crate::{has_required_runtime, request_claim_queue, request_disabled_validators, runtime}; const LOG_TARGET: &'static str = "parachain::subsystem-util-vstaging"; +/// A snapshot of the runtime claim queue at an arbitrary relay chain block. +#[derive(Default)] +pub struct ClaimQueueSnapshot(BTreeMap>); + +impl From>> for ClaimQueueSnapshot { + fn from(claim_queue_snapshot: BTreeMap>) -> Self { + ClaimQueueSnapshot(claim_queue_snapshot) + } +} + +impl ClaimQueueSnapshot { + /// Returns the `ParaId` that has a claim for `core_index` at the specified `depth` in the + /// claim queue. A depth of `0` means the very next block. + pub fn get_claim_for(&self, core_index: CoreIndex, depth: usize) -> Option { + self.0.get(&core_index)?.get(depth).copied() + } + + /// Returns an iterator over all claimed cores and the claiming `ParaId` at the specified + /// `depth` in the claim queue. + pub fn iter_claims_at_depth( + &self, + depth: usize, + ) -> impl Iterator + '_ { + self.0 + .iter() + .filter_map(move |(core_index, paras)| Some((*core_index, *paras.get(depth)?))) + } +} + // TODO: https://github.com/paritytech/polkadot-sdk/issues/1940 /// Returns disabled validators list if the runtime supports it. Otherwise logs a debug messages and /// returns an empty vec. @@ -54,3 +85,28 @@ pub async fn get_disabled_validators_with_fallback, + relay_parent: Hash, +) -> Result, runtime::Error> { + if has_required_runtime( + sender, + relay_parent, + RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT, + ) + .await + { + let res = request_claim_queue(relay_parent, sender) + .await + .await + .map_err(runtime::Error::RuntimeRequestCanceled)??; + Ok(Some(res.into())) + } else { + gum::trace!(target: LOG_TARGET, "Runtime doesn't support `request_claim_queue`"); + Ok(None) + } +} diff --git a/polkadot/node/test/client/Cargo.toml b/polkadot/node/test/client/Cargo.toml index 36748c3b455b90315ab445c7c8b612b5a0d4ab0b..7db00404eb8eca049a6194d19e5e92e66325628c 100644 --- a/polkadot/node/test/client/Cargo.toml +++ b/polkadot/node/test/client/Cargo.toml @@ -38,7 +38,7 @@ frame-benchmarking = { path = "../../../../substrate/frame/benchmarking" } [dev-dependencies] sp-keyring = { path = "../../../../substrate/primitives/keyring" } -futures = "0.3.21" +futures = "0.3.30" [features] runtime-benchmarks = [ diff --git a/polkadot/node/test/service/Cargo.toml b/polkadot/node/test/service/Cargo.toml index e7892abcd87bf09140b53f5f123798b2ccce7ac4..48a206f23c660ca9dbb5e0dd48b1cf0f4835b183 100644 --- a/polkadot/node/test/service/Cargo.toml +++ b/polkadot/node/test/service/Cargo.toml @@ -10,13 +10,13 @@ license.workspace = true workspace = true [dependencies] -futures = "0.3.21" +futures = "0.3.30" hex = "0.4.3" gum = { package = "tracing-gum", path = "../../gum" } rand = "0.8.5" serde_json = { workspace = true, default-features = true } tempfile = "3.2.0" -tokio = "1.24.2" +tokio = "1.37" # Polkadot dependencies polkadot-overseer = { path = "../../overseer" } @@ -63,7 +63,7 @@ substrate-test-client = { path = "../../../../substrate/test-utils/client" } [dev-dependencies] pallet-balances = { path = "../../../../substrate/frame/balances", default-features = false } substrate-test-utils = { path = "../../../../substrate/test-utils" } -tokio = { version = "1.24.2", features = ["macros"] } +tokio = { version = "1.37", features = ["macros"] } [features] runtime-metrics = ["polkadot-test-runtime/runtime-metrics"] diff --git a/polkadot/parachain/Cargo.toml b/polkadot/parachain/Cargo.toml index d8c3cea7ad8b16062f346c12842d5f652936797c..15eea2addc893ced551edaf2557ff703a455a879 100644 --- a/polkadot/parachain/Cargo.toml +++ b/polkadot/parachain/Cargo.toml @@ -14,7 +14,7 @@ workspace = true # this crate for WASM. This is critical to avoid forcing all parachain WASM into implementing # various unnecessary Substrate-specific endpoints. parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } sp-std = { path = "../../substrate/primitives/std", default-features = false } sp-runtime = { path = "../../substrate/primitives/runtime", default-features = false, features = ["serde"] } sp-core = { path = "../../substrate/primitives/core", default-features = false, features = ["serde"] } diff --git a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml index cb283c271191f80a0f29275fa44eb883586aeb8b..5a2b5405741446f52e5f1212de2665ab04d350d2 100644 --- a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml +++ b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml @@ -17,14 +17,14 @@ path = "src/main.rs" [dependencies] parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } clap = { version = "4.5.3", features = ["derive"] } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" log = { workspace = true, default-features = true } test-parachain-adder = { path = ".." } polkadot-primitives = { path = "../../../../primitives" } polkadot-cli = { path = "../../../../cli" } -polkadot-service = { path = "../../../../node/service", features = ["rococo-native"] } +polkadot-service = { path = "../../../../node/service", features = ["elastic-scaling-experimental", "rococo-native"] } polkadot-node-primitives = { path = "../../../../node/primitives" } polkadot-node-subsystem = { path = "../../../../node/subsystem" } diff --git a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml index 238b98a66801c7b6ec9ef1bda9248621a5f4332c..cacf7304f90a29d39848b80a58159afbbb6cb3e5 100644 --- a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml +++ b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml @@ -17,14 +17,14 @@ path = "src/main.rs" [dependencies] parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } clap = { version = "4.5.3", features = ["derive"] } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" log = { workspace = true, default-features = true } test-parachain-undying = { path = ".." } polkadot-primitives = { path = "../../../../primitives" } polkadot-cli = { path = "../../../../cli" } -polkadot-service = { path = "../../../../node/service", features = ["rococo-native"] } +polkadot-service = { path = "../../../../node/service", features = ["elastic-scaling-experimental", "rococo-native"] } polkadot-node-primitives = { path = "../../../../node/primitives" } polkadot-node-subsystem = { path = "../../../../node/subsystem" } diff --git a/polkadot/primitives/Cargo.toml b/polkadot/primitives/Cargo.toml index e63fb621c7880c03fe0282a5500df69d20528f99..004fa62acf34595c548e0e36da1d05f0a52abb53 100644 --- a/polkadot/primitives/Cargo.toml +++ b/polkadot/primitives/Cargo.toml @@ -13,7 +13,7 @@ workspace = true bitvec = { version = "1.0.0", default-features = false, features = ["alloc", "serde"] } hex-literal = "0.4.1" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive", "serde"] } log = { workspace = true, default-features = false } serde = { features = ["alloc", "derive"], workspace = true } diff --git a/polkadot/primitives/src/lib.rs b/polkadot/primitives/src/lib.rs index 745195ce092ab1e4678b54e594901275260dc7da..d4eeb3cc3d2951089213b924777cbcd9a979c1c9 100644 --- a/polkadot/primitives/src/lib.rs +++ b/polkadot/primitives/src/lib.rs @@ -20,7 +20,7 @@ #![cfg_attr(not(feature = "std"), no_std)] // `v6` is currently the latest stable version of the runtime API. -pub mod v6; +pub mod v7; // The 'staging' version is special - it contains primitives which are // still in development. Once they are considered stable, they will be @@ -33,26 +33,27 @@ pub mod runtime_api; // Current primitives not requiring versioning are exported here. // Primitives requiring versioning must not be exported and must be referred by an exact version. -pub use v6::{ +pub use v7::{ async_backing, byzantine_threshold, check_candidate_backing, collator_signature_payload, - effective_minimum_backing_votes, executor_params, metric_definitions, slashing, + effective_minimum_backing_votes, executor_params, metric_definitions, node_features, slashing, supermajority_threshold, well_known_keys, AbridgedHostConfiguration, AbridgedHrmpChannel, - AccountId, AccountIndex, AccountPublic, ApprovalVote, AssignmentId, AsyncBackingParams, - AuthorityDiscoveryId, AvailabilityBitfield, BackedCandidate, Balance, BlakeTwo256, Block, - BlockId, BlockNumber, CandidateCommitments, CandidateDescriptor, CandidateEvent, CandidateHash, - CandidateIndex, CandidateReceipt, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, - CollatorId, CollatorSignature, CommittedCandidateReceipt, CompactStatement, ConsensusLog, - CoreIndex, CoreState, DisputeState, DisputeStatement, DisputeStatementSet, DownwardMessage, - EncodeAs, ExecutorParam, ExecutorParamError, ExecutorParams, ExecutorParamsHash, + AccountId, AccountIndex, AccountPublic, ApprovalVote, ApprovalVoteMultipleCandidates, + ApprovalVotingParams, AssignmentId, AsyncBackingParams, AuthorityDiscoveryId, + AvailabilityBitfield, BackedCandidate, Balance, BlakeTwo256, Block, BlockId, BlockNumber, + CandidateCommitments, CandidateDescriptor, CandidateEvent, CandidateHash, CandidateIndex, + CandidateReceipt, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, CollatorId, + CollatorSignature, CommittedCandidateReceipt, CompactStatement, ConsensusLog, CoreIndex, + CoreState, DisputeState, DisputeStatement, DisputeStatementSet, DownwardMessage, EncodeAs, + ExecutorParam, ExecutorParamError, ExecutorParams, ExecutorParamsHash, ExplicitDisputeStatement, GroupIndex, GroupRotationInfo, Hash, HashT, HeadData, Header, HorizontalMessages, HrmpChannelId, Id, InboundDownwardMessage, InboundHrmpMessage, IndexedVec, - InherentData, InvalidDisputeStatementKind, Moment, MultiDisputeStatementSet, Nonce, - OccupiedCore, OccupiedCoreAssumption, OutboundHrmpMessage, ParathreadClaim, ParathreadEntry, - PersistedValidationData, PvfCheckStatement, PvfExecKind, PvfPrepKind, RuntimeMetricLabel, - RuntimeMetricLabelValue, RuntimeMetricLabelValues, RuntimeMetricLabels, RuntimeMetricOp, - RuntimeMetricUpdate, ScheduledCore, ScrapedOnChainVotes, SessionIndex, SessionInfo, Signature, - Signed, SignedAvailabilityBitfield, SignedAvailabilityBitfields, SignedStatement, - SigningContext, Slot, UncheckedSigned, UncheckedSignedAvailabilityBitfield, + InherentData, InvalidDisputeStatementKind, Moment, MultiDisputeStatementSet, NodeFeatures, + Nonce, OccupiedCore, OccupiedCoreAssumption, OutboundHrmpMessage, ParathreadClaim, + ParathreadEntry, PersistedValidationData, PvfCheckStatement, PvfExecKind, PvfPrepKind, + RuntimeMetricLabel, RuntimeMetricLabelValue, RuntimeMetricLabelValues, RuntimeMetricLabels, + RuntimeMetricOp, RuntimeMetricUpdate, ScheduledCore, ScrapedOnChainVotes, SessionIndex, + SessionInfo, Signature, Signed, SignedAvailabilityBitfield, SignedAvailabilityBitfields, + SignedStatement, SigningContext, Slot, UncheckedSigned, UncheckedSignedAvailabilityBitfield, UncheckedSignedAvailabilityBitfields, UncheckedSignedStatement, UpgradeGoAhead, UpgradeRestriction, UpwardMessage, ValidDisputeStatementKind, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation, @@ -63,4 +64,4 @@ pub use v6::{ }; #[cfg(feature = "std")] -pub use v6::{AssignmentPair, CollatorPair, ValidatorPair}; +pub use v7::{AssignmentPair, CollatorPair, ValidatorPair}; diff --git a/polkadot/primitives/src/runtime_api.rs b/polkadot/primitives/src/runtime_api.rs index 6dca33f88234bfb064ade5f519dec378dc01ce71..f611936f2701d367283503e761176e9b61e72cbe 100644 --- a/polkadot/primitives/src/runtime_api.rs +++ b/polkadot/primitives/src/runtime_api.rs @@ -114,13 +114,11 @@ //! separated from the stable primitives. use crate::{ - async_backing, slashing, - vstaging::{self, ApprovalVotingParams}, - AsyncBackingParams, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreIndex, CoreState, DisputeState, ExecutorParams, - GroupRotationInfo, Hash, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, - ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidatorId, ValidatorIndex, - ValidatorSignature, + async_backing, slashing, ApprovalVotingParams, AsyncBackingParams, BlockNumber, + CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreIndex, + CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, NodeFeatures, + OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, + SessionIndex, SessionInfo, ValidatorId, ValidatorIndex, ValidatorSignature, }; use polkadot_core_primitives as pcp; @@ -279,7 +277,7 @@ sp_api::decl_runtime_apis! { /// Get node features. /// This is a staging method! Do not use on production runtimes! #[api_version(9)] - fn node_features() -> vstaging::NodeFeatures; + fn node_features() -> NodeFeatures; /***** Added in v10 *****/ /// Approval voting configuration parameters diff --git a/polkadot/primitives/src/v6/async_backing.rs b/polkadot/primitives/src/v7/async_backing.rs similarity index 100% rename from polkadot/primitives/src/v6/async_backing.rs rename to polkadot/primitives/src/v7/async_backing.rs diff --git a/polkadot/primitives/src/v6/executor_params.rs b/polkadot/primitives/src/v7/executor_params.rs similarity index 100% rename from polkadot/primitives/src/v6/executor_params.rs rename to polkadot/primitives/src/v7/executor_params.rs diff --git a/polkadot/primitives/src/v6/metrics.rs b/polkadot/primitives/src/v7/metrics.rs similarity index 100% rename from polkadot/primitives/src/v6/metrics.rs rename to polkadot/primitives/src/v7/metrics.rs diff --git a/polkadot/primitives/src/v6/mod.rs b/polkadot/primitives/src/v7/mod.rs similarity index 97% rename from polkadot/primitives/src/v6/mod.rs rename to polkadot/primitives/src/v7/mod.rs index 9e7f910314cc3ae5db5c260ca5ba4a9f873a4d47..d4f4a6335772e2b7d5aed5f5aef8310e828c36ce 100644 --- a/polkadot/primitives/src/v6/mod.rs +++ b/polkadot/primitives/src/v7/mod.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! `V6` Primitives. +//! `V7` Primitives. use bitvec::{field::BitField, slice::BitSlice, vec::BitVec}; use parity_scale_codec::{Decode, Encode}; @@ -1184,6 +1184,32 @@ impl<'a> ApprovalVoteMultipleCandidates<'a> { } } +/// Approval voting configuration parameters +#[derive( + RuntimeDebug, + Copy, + Clone, + PartialEq, + Encode, + Decode, + TypeInfo, + serde::Serialize, + serde::Deserialize, +)] +pub struct ApprovalVotingParams { + /// The maximum number of candidates `approval-voting` can vote for with + /// a single signatures. + /// + /// Setting it to 1, means we send the approval as soon as we have it available. + pub max_approval_coalesce_count: u32, +} + +impl Default for ApprovalVotingParams { + fn default() -> Self { + Self { max_approval_coalesce_count: 1 } + } +} + /// Custom validity errors used in Polkadot while validating transactions. #[repr(u8)] pub enum ValidityError { @@ -1457,7 +1483,7 @@ pub enum ValidDisputeStatementKind { #[codec(index = 3)] ApprovalChecking, /// An approval vote from the new version. - /// We can't create this version untill all nodes + /// We can't create this version until all nodes /// have been updated to support it and max_approval_coalesce_count /// is set to more than 1. #[codec(index = 4)] @@ -1604,7 +1630,7 @@ impl ValidityAttestation { pub fn to_compact_statement(&self, candidate_hash: CandidateHash) -> CompactStatement { // Explicit and implicit map directly from // `ValidityVote::Valid` and `ValidityVote::Issued`, and hence there is a - // `1:1` relationshow which enables the conversion. + // `1:1` relationship which enables the conversion. match *self { ValidityAttestation::Implicit(_) => CompactStatement::Seconded(candidate_hash), ValidityAttestation::Explicit(_) => CompactStatement::Valid(candidate_hash), @@ -1947,6 +1973,29 @@ pub enum PvfExecKind { Approval, } +/// Bit indices in the `HostConfiguration.node_features` that correspond to different node features. +pub type NodeFeatures = BitVec; + +/// Module containing feature-specific bit indices into the `NodeFeatures` bitvec. +pub mod node_features { + /// A feature index used to identify a bit into the node_features array stored + /// in the HostConfiguration. + #[repr(u8)] + pub enum FeatureIndex { + /// Tells if tranch0 assignments could be sent in a single certificate. + /// Reserved for: `` + EnableAssignmentsV2 = 0, + /// This feature enables the extension of `BackedCandidate::validator_indices` by 8 bits. + /// The value stored there represents the assumed core index where the candidates + /// are backed. This is needed for the elastic scaling MVP. + ElasticScalingMVP = 1, + /// First unassigned feature bit. + /// Every time a new feature flag is assigned it should take this value. + /// and this should be incremented. + FirstUnassigned = 2, + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/polkadot/primitives/src/v6/signed.rs b/polkadot/primitives/src/v7/signed.rs similarity index 100% rename from polkadot/primitives/src/v6/signed.rs rename to polkadot/primitives/src/v7/signed.rs diff --git a/polkadot/primitives/src/v6/slashing.rs b/polkadot/primitives/src/v7/slashing.rs similarity index 100% rename from polkadot/primitives/src/v6/slashing.rs rename to polkadot/primitives/src/v7/slashing.rs diff --git a/polkadot/primitives/src/vstaging/mod.rs b/polkadot/primitives/src/vstaging/mod.rs index bd2a5106c44de5895fb327957161c84f13a983b0..1af73993f640cb8d28bc5145ffb797ab0619d16c 100644 --- a/polkadot/primitives/src/vstaging/mod.rs +++ b/polkadot/primitives/src/vstaging/mod.rs @@ -17,7 +17,7 @@ //! Staging Primitives. // Put any primitives used by staging APIs functions here -pub use crate::v6::*; +use crate::v7::*; use sp_std::prelude::*; use parity_scale_codec::{Decode, Encode}; @@ -25,32 +25,6 @@ use primitives::RuntimeDebug; use scale_info::TypeInfo; use sp_arithmetic::Perbill; -/// Approval voting configuration parameters -#[derive( - RuntimeDebug, - Copy, - Clone, - PartialEq, - Encode, - Decode, - TypeInfo, - serde::Serialize, - serde::Deserialize, -)] -pub struct ApprovalVotingParams { - /// The maximum number of candidates `approval-voting` can vote for with - /// a single signatures. - /// - /// Setting it to 1, means we send the approval as soon as we have it available. - pub max_approval_coalesce_count: u32, -} - -impl Default for ApprovalVotingParams { - fn default() -> Self { - Self { max_approval_coalesce_count: 1 } - } -} - /// Scheduler configuration parameters. All coretime/ondemand parameters are here. #[derive( RuntimeDebug, @@ -102,7 +76,7 @@ pub struct SchedulerParams { pub on_demand_fee_variability: Perbill, /// The minimum amount needed to claim a slot in the spot pricing queue. pub on_demand_base_fee: Balance, - /// The number of blocks a claim stays in the scheduler's claimqueue before getting cleared. + /// The number of blocks a claim stays in the scheduler's claim queue before getting cleared. /// This number should go reasonably higher than the number of blocks in the async backing /// lookahead. pub ttl: BlockNumber, @@ -125,28 +99,3 @@ impl> Default for SchedulerParams } } } - -use bitvec::vec::BitVec; - -/// Bit indices in the `HostConfiguration.node_features` that correspond to different node features. -pub type NodeFeatures = BitVec; - -/// Module containing feature-specific bit indices into the `NodeFeatures` bitvec. -pub mod node_features { - /// A feature index used to indentify a bit into the node_features array stored - /// in the HostConfiguration. - #[repr(u8)] - pub enum FeatureIndex { - /// Tells if tranch0 assignments could be sent in a single certificate. - /// Reserved for: `` - EnableAssignmentsV2 = 0, - /// This feature enables the extension of `BackedCandidate::validator_indices` by 8 bits. - /// The value stored there represents the assumed core index where the candidates - /// are backed. This is needed for the elastic scaling MVP. - ElasticScalingMVP = 1, - /// First unassigned feature bit. - /// Every time a new feature flag is assigned it should take this value. - /// and this should be incremented. - FirstUnassigned = 2, - } -} diff --git a/polkadot/primitives/test-helpers/src/lib.rs b/polkadot/primitives/test-helpers/src/lib.rs index b7c5d82341a07f183a83ef3351fd66c149aaa99d..d43cf3317e573adffb21b8fdc077eeebf993d40d 100644 --- a/polkadot/primitives/test-helpers/src/lib.rs +++ b/polkadot/primitives/test-helpers/src/lib.rs @@ -249,7 +249,7 @@ pub fn resign_candidate_descriptor_with_collator>( descriptor.signature = signature; } -/// Extracts validators's public keus (`ValidatorId`) from `Sr25519Keyring` +/// Extracts validators's public keys (`ValidatorId`) from `Sr25519Keyring` pub fn validator_pubkeys(val_ids: &[Sr25519Keyring]) -> Vec { val_ids.iter().map(|v| v.public().into()).collect() } diff --git a/polkadot/roadmap/implementers-guide/src/disputes-flow.md b/polkadot/roadmap/implementers-guide/src/disputes-flow.md index b5cc5611c6ff897826e128d240c38761991e0a5c..540b3c45bad4b3328437ad5e6177227ef13a4f22 100644 --- a/polkadot/roadmap/implementers-guide/src/disputes-flow.md +++ b/polkadot/roadmap/implementers-guide/src/disputes-flow.md @@ -74,7 +74,7 @@ The set of validators eligible to vote consists of the validators that had duty votes by the backing validators. If a validator receives an initial dispute message (a set of votes where there are at least two opposing votes -contained), and the PoV or Code are hence not reconstructable from local storage, that validator must request the +contained), and the PoV or Code are hence not reconstructible from local storage, that validator must request the required data from its peers. The dispute availability message must contain code, persisted validation data, and the proof of validity. diff --git a/polkadot/roadmap/implementers-guide/src/node/approval/approval-distribution.md b/polkadot/roadmap/implementers-guide/src/node/approval/approval-distribution.md index ce71de6f76b8f8550e815849ba80002f890a9b85..c987b7fe5beaa366c23c2fc58a179a916bbb8a21 100644 --- a/polkadot/roadmap/implementers-guide/src/node/approval/approval-distribution.md +++ b/polkadot/roadmap/implementers-guide/src/node/approval/approval-distribution.md @@ -101,7 +101,7 @@ struct State { } enum MessageFingerprint { - Assigment(Hash, u32, ValidatorIndex), + Assignment(Hash, u32, ValidatorIndex), Approval(Hash, u32, ValidatorIndex), } @@ -203,7 +203,7 @@ For all peers: * Compute `view_intersection` as the intersection of the peer's view blocks with the hashes of the new blocks. * Invoke `unify_with_peer(peer, view_intersection)`. -#### `ApprovalDistributionMessage::DistributeAsignment` +#### `ApprovalDistributionMessage::DistributeAssignment` Call `import_and_circulate_assignment` with `MessageSource::Local`. diff --git a/polkadot/roadmap/implementers-guide/src/node/availability/availability-recovery.md b/polkadot/roadmap/implementers-guide/src/node/availability/availability-recovery.md index e3bb14db3a55499dfc1546494c8e83d5e6cb6ed7..c57c4589244e79f6cd54a87f823abc466ed8eb43 100644 --- a/polkadot/roadmap/implementers-guide/src/node/availability/availability-recovery.md +++ b/polkadot/roadmap/implementers-guide/src/node/availability/availability-recovery.md @@ -115,7 +115,7 @@ On `Conclude`, shut down the subsystem. Launch the source as a background task running `run(recovery_task)`. -#### `run(recovery_task) -> Result` +#### `run(recovery_task) -> Result` ```rust // How many parallel requests to have going at once. diff --git a/polkadot/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md b/polkadot/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md index e0738e219d1b6f02e20d5bd360b9ed9f72b3cbd7..90b29249f3e5bdf2170d0f56e95e99db1cfd1b83 100644 --- a/polkadot/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md +++ b/polkadot/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md @@ -76,7 +76,7 @@ dispute is raised reconsider their vote and send an explicit invalid vote. If th recorded, then they could avoid a slash. This is not a problem for our basic security assumptions: The backers are the ones to be supposed to have skin in the -game, so we are not too woried about colluding approval voters getting away slash free as the gambler's ruin property is +game, so we are not too worried about colluding approval voters getting away slash free as the gambler's ruin property is maintained anyway. There is however a separate problem, from colluding approval-voters, that is "lazy" approval voters. If it were easy and reliable for approval-voters to reconsider their vote, in case of an actual dispute, then they don't have a direct incentive (apart from playing a part in securing the network) to properly run the validation function at @@ -331,13 +331,13 @@ off-chain. The reason for this is a dispute can be raised for a candidate in a p validator that is going to be slashed for it might not even be in the current active set. That means it can't be disabled on-chain. We need a way to prevent someone from disputing all valid candidates in the previous era. We do this by keeping track of the validators who lost a dispute in the past few sessions and use that list in addition to the -on-chain disabled validators state. In addition to past session misbehavior, this also heps in case a slash is delayed. +on-chain disabled validators state. In addition to past session misbehavior, this also helps in case a slash is delayed. When we receive a dispute statements set, we do the following: 1. Take the on-chain state of disabled validators at the relay parent block. 1. Take a list of those who lost a dispute in that session in the order that prioritizes the biggest and newest offence. 1. Combine the two lists and take the first byzantine threshold validators from it. -1. If the dispute is unconfimed, check if all votes against the candidate are from disabled validators. +1. If the dispute is unconfirmed, check if all votes against the candidate are from disabled validators. If so, we don't participate in the dispute, but record the votes. ### Backing Votes @@ -591,7 +591,7 @@ Initiates processing via the `Participation` module and updates the internal sta ### On `MuxedMessage::Participation` -This message is sent from `Participatuion` module and indicates a processed dispute participation. It's the result of +This message is sent from `Participation` module and indicates a processed dispute participation. It's the result of the processing job initiated with `OverseerSignal::ActiveLeaves`. The subsystem issues a `DisputeMessage` with the result. diff --git a/polkadot/roadmap/implementers-guide/src/node/overseer.md b/polkadot/roadmap/implementers-guide/src/node/overseer.md index 53a1153081013b684d0cab419fb3eaa0be5a15c1..960539b84998455ab52350a78f432dd9d399ab89 100644 --- a/polkadot/roadmap/implementers-guide/src/node/overseer.md +++ b/polkadot/roadmap/implementers-guide/src/node/overseer.md @@ -108,11 +108,11 @@ way that the receiving subsystem can further address the communication to one of This communication prevents a certain class of race conditions. When the Overseer determines that it is time for subsystems to begin working on top of a particular relay-parent, it will dispatch a `ActiveLeavesUpdate` message to all subsystems to do so, and those messages will be handled asynchronously by those subsystems. Some subsystems will receive -those messsages before others, and it is important that a message sent by subsystem A after receiving +those messages before others, and it is important that a message sent by subsystem A after receiving `ActiveLeavesUpdate` message will arrive at subsystem B after its `ActiveLeavesUpdate` message. If subsystem A maintained an independent channel with subsystem B to communicate, it would be possible for subsystem B to handle the side message before the `ActiveLeavesUpdate` message, but it wouldn't have any logical course of action to take with the -side message - leading to it being discarded or improperly handled. Well-architectured state machines should have a +side message - leading to it being discarded or improperly handled. Well-architected state machines should have a single source of inputs, so that is what we do here. One exception is reasonable to make for responses to requests. A request should be made via the overseer in order to diff --git a/polkadot/roadmap/implementers-guide/src/node/utility/pvf-prechecker.md b/polkadot/roadmap/implementers-guide/src/node/utility/pvf-prechecker.md index 7f6fef7ddf631f10eadd7c1cc5f4d7d4b7f9cd04..f8e88a67fcb813cae22fc43abd273f020b91620f 100644 --- a/polkadot/roadmap/implementers-guide/src/node/utility/pvf-prechecker.md +++ b/polkadot/roadmap/implementers-guide/src/node/utility/pvf-prechecker.md @@ -8,7 +8,7 @@ pre-checking. Head over to [overview] for the PVF pre-checking process overview. There is no dedicated input mechanism for PVF pre-checker. Instead, PVF pre-checker looks on the `ActiveLeavesUpdate` event stream for work. -This subsytem does not produce any output messages either. The subsystem will, however, send messages to the +This subsystem does not produce any output messages either. The subsystem will, however, send messages to the [Runtime API] subsystem to query for the pending PVFs and to submit votes. In addition to that, it will also communicate with [Candidate Validation] Subsystem to request PVF pre-check. diff --git a/polkadot/roadmap/implementers-guide/src/runtime/README.md b/polkadot/roadmap/implementers-guide/src/runtime/README.md index 459f0e6b69d98bddb19a9ccea85830ed8dc01150..10eedb49ec38233af5b17992134f1f14dfcbaa74 100644 --- a/polkadot/roadmap/implementers-guide/src/runtime/README.md +++ b/polkadot/roadmap/implementers-guide/src/runtime/README.md @@ -89,7 +89,7 @@ struct SessionChangeNotification { prev_config: HostConfiguration, // The configuration after handling the session change. new_config: HostConfiguration, - // A secure randomn seed for the session, gathered from BABE. + // A secure random seed for the session, gathered from BABE. random_seed: [u8; 32], // The session index of the beginning session. session_index: SessionIndex, diff --git a/polkadot/roadmap/implementers-guide/src/runtime/hrmp.md b/polkadot/roadmap/implementers-guide/src/runtime/hrmp.md index 69d33ca8670db942ecd8f5cb564f8d973ef6e47b..ed765634b592cdabf0b1f971ce34e7edbc9f069c 100644 --- a/polkadot/roadmap/implementers-guide/src/runtime/hrmp.md +++ b/polkadot/roadmap/implementers-guide/src/runtime/hrmp.md @@ -62,7 +62,7 @@ HRMP related storage layout HrmpOpenChannelRequests: map HrmpChannelId => Option; HrmpOpenChannelRequestsList: Vec; -/// This mapping tracks how many open channel requests are inititated by a given sender para. +/// This mapping tracks how many open channel requests are initiated by a given sender para. /// Invariant: `HrmpOpenChannelRequests` should contain the same number of items that has `(X, _)` /// as the number of `HrmpOpenChannelRequestCount` for `X`. HrmpOpenChannelRequestCount: map ParaId => u32; @@ -233,7 +233,7 @@ executed the message. 1. Send a downward message to the opposite party notifying about the channel closing. * The DM is sent using `queue_downward_message`. * The DM is represented by the `HrmpChannelClosing` XCM message with: - * `initator` is set to `origin`, + * `initiator` is set to `origin`, * `sender` is set to `ch.sender`, * `recipient` is set to `ch.recipient`. * The opposite party is `ch.sender` if `origin` is `ch.recipient` and `ch.recipient` if `origin` is `ch.sender`. diff --git a/polkadot/roadmap/implementers-guide/src/runtime/parainherent.md b/polkadot/roadmap/implementers-guide/src/runtime/parainherent.md index 1345f0eea95e2ed007ff5b23ec4a85144b5d6893..7972c706b9ee1ebce2132a49ccf62a194d6c1e58 100644 --- a/polkadot/roadmap/implementers-guide/src/runtime/parainherent.md +++ b/polkadot/roadmap/implementers-guide/src/runtime/parainherent.md @@ -88,7 +88,7 @@ to `sanitize_bitfields` function for implementation details. Backed candidates sanitization removes malformed ones, candidates which have got concluded invalid disputes against them or candidates produced by unassigned cores. Furthermore any backing votes from disabled validators for a candidate are dropped. This is part of the validator disabling strategy. After filtering the statements from disabled validators a -backed candidate may end up with votes count less than `minimum_backing_votes` (a parameter from `HostConfiguiration`). +backed candidate may end up with votes count less than `minimum_backing_votes` (a parameter from `HostConfiguration`). In this case the whole candidate is dropped otherwise it will be rejected by `process_candidates` from pallet inclusion. All checks related to backed candidates are implemented in `sanitize_backed_candidates` and `filter_backed_statements_from_disabled_validators`. diff --git a/polkadot/roadmap/implementers-guide/src/types/approval.md b/polkadot/roadmap/implementers-guide/src/types/approval.md index c19ffa53762a5fc9e2ac0766604efc22cf20ef4d..29d973ca0ab8c3717bfbaa1077bd5f9873cf7a94 100644 --- a/polkadot/roadmap/implementers-guide/src/types/approval.md +++ b/polkadot/roadmap/implementers-guide/src/types/approval.md @@ -39,7 +39,7 @@ enum AssignmentCertKindV2 { /// The core index chosen in this cert. core_index: CoreIndex, }, - /// Deprectated assignment. Soon to be removed. + /// Deprecated assignment. Soon to be removed. /// /// An assignment story based on the VRF that authorized the relay-chain block where the /// candidate was included combined with a sample number. @@ -117,7 +117,7 @@ struct IndirectSignedApprovalVote { ## `CheckedAssignmentCert` An assignment cert which has checked both the VRF and the validity of the implied assignment according to the selection -criteria rules of the protocol. This type should be declared in such a way as to be instantiatable only when the checks +criteria rules of the protocol. This type should be declared in such a way as to be instantiable only when the checks have actually been done. Fields should be accessible via getters, not direct struct access. ```rust diff --git a/polkadot/roadmap/implementers-guide/src/types/disputes.md b/polkadot/roadmap/implementers-guide/src/types/disputes.md index c49e0fea262510ef2e1585f9aa8433dc02883b50..ac09084c48a2f3170939387473ed4c772ee1dcf1 100644 --- a/polkadot/roadmap/implementers-guide/src/types/disputes.md +++ b/polkadot/roadmap/implementers-guide/src/types/disputes.md @@ -82,7 +82,7 @@ struct DisputeState { struct ScrapedOnChainVotes { /// The session index at which the block was included. session: SessionIndex, - /// The backing and seconding validity attestations for all candidates, provigind the full candidate receipt. + /// The backing and seconding validity attestations for all candidates, providing the full candidate receipt. backing_validators_per_candidate: Vec<(CandidateReceipt, Vec<(ValidatorIndex, ValidityAttestation)>)> /// Set of concluded disputes that were recorded /// on chain within the inherent. diff --git a/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md b/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md index 54cdc2edd12d2537c1b80d9f2e973562375fc150..e011afb97089aaf59d685b8d4bf9998d21390146 100644 --- a/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md +++ b/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md @@ -22,7 +22,7 @@ All subsystems have their own message types; all of them need to be able to list are currently two proposals for how to handle that with unified communication channels: 1. Retaining the `OverseerSignal` definition above, add `enum FromOrchestra {Signal(OverseerSignal), Message(T)}`. -1. Add a generic varint to `OverseerSignal`: `Message(T)`. +1. Add a generic variant to `OverseerSignal`: `Message(T)`. Either way, there will be some top-level type encapsulating messages from the overseer to each subsystem. @@ -340,9 +340,15 @@ enum BitfieldSigningMessage { } ```rust enum CandidateBackingMessage { /// Requests a set of backable candidates attested by the subsystem. - /// - /// Each pair is (candidate_hash, candidate_relay_parent). - GetBackedCandidates(Vec<(CandidateHash, Hash)>, oneshot::Sender>), + /// The order of candidates of the same para must be preserved in the response. + /// If a backed candidate of a para cannot be retrieved, the response should not contain any + /// candidates of the same para that follow it in the input vector. In other words, assuming + /// candidates are supplied in dependency order, we must ensure that this dependency order is + /// preserved. + GetBackedCandidates( + HashMap>, + oneshot::Sender>>, + ), /// Note that the Candidate Backing subsystem should second the given candidate in the context of the /// given relay-parent (ref. by hash). This candidate must be validated using the provided PoV. /// The PoV is expected to match the `pov_hash` in the descriptor. diff --git a/polkadot/roadmap/phase-1.toml b/polkadot/roadmap/phase-1.toml index 3a5f0d752debee41c51a2124f8e91f3e03dc9d68..9b9374d234bdc735a601ad93f94899ab086c2c06 100644 --- a/polkadot/roadmap/phase-1.toml +++ b/polkadot/roadmap/phase-1.toml @@ -34,7 +34,7 @@ requires = ["two-phase-inclusion"] items = [ { label = "Submit secondary checks to runtime", port = "submitsecondary", requires = ["secondary-checking"] }, { label = "Track all candidates within the slash period as well as their session" }, - { label = "Track reports and attestatations for candidates" }, + { label = "Track reports and attestations for candidates" }, ] [[group]] diff --git a/polkadot/runtime/common/Cargo.toml b/polkadot/runtime/common/Cargo.toml index eae5d4fb2ef90a0acb515b863dfb0013c45bac89..4219a7e7b0dcb57c50a5e709d0695dfa723b582a 100644 --- a/polkadot/runtime/common/Cargo.toml +++ b/polkadot/runtime/common/Cargo.toml @@ -15,7 +15,7 @@ bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } rustc-hex = { version = "2.1.0", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc"], workspace = true } serde_derive = { workspace = true } static_assertions = "1.1.0" diff --git a/polkadot/runtime/common/src/assigned_slots/migration.rs b/polkadot/runtime/common/src/assigned_slots/migration.rs index def6bad692a235d3282774b0db3fabdaf33c89b3..7e582dfa59635b180d2af55c9c6a4ef1cbae5468 100644 --- a/polkadot/runtime/common/src/assigned_slots/migration.rs +++ b/polkadot/runtime/common/src/assigned_slots/migration.rs @@ -15,7 +15,7 @@ // along with Polkadot. If not, see . use super::{Config, MaxPermanentSlots, MaxTemporarySlots, Pallet, LOG_TARGET}; -use frame_support::traits::{Get, GetStorageVersion, OnRuntimeUpgrade}; +use frame_support::traits::{Get, GetStorageVersion, UncheckedOnRuntimeUpgrade}; #[cfg(feature = "try-runtime")] use frame_support::ensure; @@ -23,10 +23,9 @@ use frame_support::ensure; use sp_std::vec::Vec; pub mod v1 { - use super::*; pub struct VersionUncheckedMigrateToV1(sp_std::marker::PhantomData); - impl OnRuntimeUpgrade for VersionUncheckedMigrateToV1 { + impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateToV1 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { let on_chain_version = Pallet::::on_chain_storage_version(); diff --git a/polkadot/runtime/common/src/paras_registrar/migration.rs b/polkadot/runtime/common/src/paras_registrar/migration.rs index f977674a1e4e22254bcac6e68cc7bdf20711a043..18bb6bbfb559a23e44109230ad7fd87d0b2682ca 100644 --- a/polkadot/runtime/common/src/paras_registrar/migration.rs +++ b/polkadot/runtime/common/src/paras_registrar/migration.rs @@ -15,7 +15,7 @@ // along with Polkadot. If not, see . use super::*; -use frame_support::traits::{Contains, OnRuntimeUpgrade}; +use frame_support::traits::{Contains, UncheckedOnRuntimeUpgrade}; #[derive(Encode, Decode)] pub struct ParaInfoV1 { @@ -27,7 +27,7 @@ pub struct ParaInfoV1 { pub struct VersionUncheckedMigrateToV1( sp_std::marker::PhantomData<(T, UnlockParaIds)>, ); -impl> OnRuntimeUpgrade +impl> UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateToV1 { fn on_runtime_upgrade() -> Weight { diff --git a/polkadot/runtime/common/src/paras_registrar/mod.rs b/polkadot/runtime/common/src/paras_registrar/mod.rs index 5b2098388d81cbc0028f9a1062b5ec9302a802ec..7abe23917e4c648acc1c7d47428416f447ff61b2 100644 --- a/polkadot/runtime/common/src/paras_registrar/mod.rs +++ b/polkadot/runtime/common/src/paras_registrar/mod.rs @@ -29,7 +29,7 @@ use frame_system::{self, ensure_root, ensure_signed}; use primitives::{HeadData, Id as ParaId, ValidationCode, LOWEST_PUBLIC_ID, MIN_CODE_SIZE}; use runtime_parachains::{ configuration, ensure_parachain, - paras::{self, ParaGenesisArgs, SetGoAhead}, + paras::{self, ParaGenesisArgs, UpgradeStrategy}, Origin, ParaLifecycle, }; use sp_std::{prelude::*, result}; @@ -408,6 +408,13 @@ pub mod pallet { /// Schedule a parachain upgrade. /// + /// This will kick off a check of `new_code` by all validators. After the majority of the + /// validators have reported on the validity of the code, the code will either be enacted + /// or the upgrade will be rejected. If the code will be enacted, the current code of the + /// parachain will be overwritten directly. This means that any PoV will be checked by this + /// new code. The parachain itself will not be informed explictely that the validation code + /// has changed. + /// /// Can be called by Root, the parachain, or the parachain manager if the parachain is /// unlocked. #[pallet::call_index(7)] @@ -418,7 +425,11 @@ pub mod pallet { new_code: ValidationCode, ) -> DispatchResult { Self::ensure_root_para_or_owner(origin, para)?; - runtime_parachains::schedule_code_upgrade::(para, new_code, SetGoAhead::No)?; + runtime_parachains::schedule_code_upgrade::( + para, + new_code, + UpgradeStrategy::ApplyAtExpectedBlock, + )?; Ok(()) } diff --git a/polkadot/runtime/common/src/xcm_sender.rs b/polkadot/runtime/common/src/xcm_sender.rs index 46d09afcfb2c80215ed7ab18eb7be5750de1fcf6..4bbf6a14a40a8a797a1fd184e7387251af7e1ec2 100644 --- a/polkadot/runtime/common/src/xcm_sender.rs +++ b/polkadot/runtime/common/src/xcm_sender.rs @@ -139,7 +139,7 @@ where /// Implementation of `xcm_builder::EnsureDelivery` which helps to ensure delivery to the /// `ParaId` parachain (sibling or child). Deposits existential deposit for origin (if needed). /// Deposits estimated fee to the origin account (if needed). -/// Allows to trigger additional logic for specific `ParaId` (e.g. open HRMP channel) (if neeeded). +/// Allows to trigger additional logic for specific `ParaId` (e.g. open HRMP channel) (if needed). #[cfg(feature = "runtime-benchmarks")] pub struct ToParachainDeliveryHelper< XcmConfig, diff --git a/polkadot/runtime/parachains/Cargo.toml b/polkadot/runtime/parachains/Cargo.toml index 6e693b83ae138d3b9a0a3cb580485f3560b2d50c..dff8549f29f3ad38748c3c2cc5b21c328356196a 100644 --- a/polkadot/runtime/parachains/Cargo.toml +++ b/polkadot/runtime/parachains/Cargo.toml @@ -15,7 +15,7 @@ bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } log = { workspace = true } rustc-hex = { version = "2.1.0", default-features = false } -scale-info = { version = "2.11.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], workspace = true } derive_more = "0.99.17" bitflags = "1.3.2" @@ -59,7 +59,7 @@ polkadot-runtime-metrics = { path = "../metrics", default-features = false } polkadot-core-primitives = { path = "../../core-primitives", default-features = false } [dev-dependencies] -futures = "0.3.21" +futures = "0.3.30" hex-literal = "0.4.1" keyring = { package = "sp-keyring", path = "../../../substrate/primitives/keyring" } frame-support-test = { path = "../../../substrate/frame/support/test" } diff --git a/polkadot/runtime/parachains/src/assigner_coretime/tests.rs b/polkadot/runtime/parachains/src/assigner_coretime/tests.rs index 998e39670f97d6f6e48d079e4544e547d702509b..5d42a9d0c8eec877292b2d5d6ca318e7df8df5b4 100644 --- a/polkadot/runtime/parachains/src/assigner_coretime/tests.rs +++ b/polkadot/runtime/parachains/src/assigner_coretime/tests.rs @@ -622,7 +622,7 @@ fn assignment_proportions_in_core_state_work() { ); } - // Case 2: Current assignment remaning < step after pop + // Case 2: Current assignment remaining < step after pop { assert_eq!( CoretimeAssigner::pop_assignment_for_core(core_idx), diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs b/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs index 5071653377d49cb80d9af73375ead355e81ade01..8589ddc292bdd6b886934341b758cc1b9ed2d1aa 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs @@ -18,7 +18,7 @@ use super::*; use frame_support::{ migrations::VersionedMigration, pallet_prelude::ValueQuery, storage_alias, - traits::OnRuntimeUpgrade, weights::Weight, + traits::UncheckedOnRuntimeUpgrade, weights::Weight, }; mod v0 { @@ -51,7 +51,7 @@ mod v1 { /// Migration to V1 pub struct UncheckedMigrateToV1(sp_std::marker::PhantomData); - impl OnRuntimeUpgrade for UncheckedMigrateToV1 { + impl UncheckedOnRuntimeUpgrade for UncheckedMigrateToV1 { fn on_runtime_upgrade() -> Weight { let mut weight: Weight = Weight::zero(); @@ -141,7 +141,7 @@ pub type MigrateV0ToV1 = VersionedMigration< #[cfg(test)] mod tests { - use super::{v0, v1, OnRuntimeUpgrade, Weight}; + use super::{v0, v1, UncheckedOnRuntimeUpgrade, Weight}; use crate::mock::{new_test_ext, MockGenesisConfig, OnDemandAssigner, Test}; use primitives::Id as ParaId; @@ -163,7 +163,7 @@ mod tests { // For tests, db weight is zero. assert_eq!( - as OnRuntimeUpgrade>::on_runtime_upgrade(), + as UncheckedOnRuntimeUpgrade>::on_runtime_upgrade(), Weight::zero() ); diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs index c47c8745e654a616eda92e06ffb127ddfe40cd22..26951f34252cb61d4c9a436ad981164003064895 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs @@ -684,7 +684,7 @@ where /// Adds an order to the on demand queue. /// - /// Paramenters: + /// Parameters: /// - `location`: Whether to push this entry to the back or the front of the queue. Pushing an /// entry to the front of the queue is only used when the scheduler wants to push back an /// entry it has already popped. @@ -763,7 +763,7 @@ where /// Increases the affinity of a `ParaId` to a specified `CoreIndex`. /// Adds to the count of the `CoreAffinityCount` if an entry is found and the core_index - /// matches. A non-existant entry will be initialized with a count of 1 and uses the supplied + /// matches. A non-existent entry will be initialized with a count of 1 and uses the supplied /// `CoreIndex`. fn increase_affinity(para_id: ParaId, core_index: CoreIndex) { ParaIdAffinity::::mutate(para_id, |maybe_affinity| match maybe_affinity { diff --git a/polkadot/runtime/parachains/src/builder.rs b/polkadot/runtime/parachains/src/builder.rs index 73617010f6dece72af1ffbd4ccc3fc0d91e70f12..e29c2e218ed9be39415b7b7a5dc6504424e0bea3 100644 --- a/polkadot/runtime/parachains/src/builder.rs +++ b/polkadot/runtime/parachains/src/builder.rs @@ -25,13 +25,13 @@ use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; use primitives::{ - collator_signature_payload, vstaging::node_features::FeatureIndex, AvailabilityBitfield, - BackedCandidate, CandidateCommitments, CandidateDescriptor, CandidateHash, CollatorId, - CollatorSignature, CommittedCandidateReceipt, CompactStatement, CoreIndex, DisputeStatement, - DisputeStatementSet, GroupIndex, HeadData, Id as ParaId, IndexedVec, - InherentData as ParachainsInherentData, InvalidDisputeStatementKind, PersistedValidationData, - SessionIndex, SigningContext, UncheckedSigned, ValidDisputeStatementKind, ValidationCode, - ValidatorId, ValidatorIndex, ValidityAttestation, + collator_signature_payload, node_features::FeatureIndex, AvailabilityBitfield, BackedCandidate, + CandidateCommitments, CandidateDescriptor, CandidateHash, CollatorId, CollatorSignature, + CommittedCandidateReceipt, CompactStatement, CoreIndex, DisputeStatement, DisputeStatementSet, + GroupIndex, HeadData, Id as ParaId, IndexedVec, InherentData as ParachainsInherentData, + InvalidDisputeStatementKind, PersistedValidationData, SessionIndex, SigningContext, + UncheckedSigned, ValidDisputeStatementKind, ValidationCode, ValidatorId, ValidatorIndex, + ValidityAttestation, }; use sp_core::{sr25519, H256}; use sp_runtime::{ diff --git a/polkadot/runtime/parachains/src/configuration.rs b/polkadot/runtime/parachains/src/configuration.rs index b7635dcd7b227993c38475a6ca13fd9a7fe53cb5..e1246fb88975877106e9ac9daffc2c689d3ecf55 100644 --- a/polkadot/runtime/parachains/src/configuration.rs +++ b/polkadot/runtime/parachains/src/configuration.rs @@ -26,10 +26,9 @@ use polkadot_parachain_primitives::primitives::{ MAX_HORIZONTAL_MESSAGE_NUM, MAX_UPWARD_MESSAGE_NUM, }; use primitives::{ - vstaging::{ApprovalVotingParams, NodeFeatures}, - AsyncBackingParams, Balance, ExecutorParamError, ExecutorParams, SessionIndex, - LEGACY_MIN_BACKING_VOTES, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, - ON_DEMAND_MAX_QUEUE_MAX_SIZE, + ApprovalVotingParams, AsyncBackingParams, Balance, ExecutorParamError, ExecutorParams, + NodeFeatures, SessionIndex, LEGACY_MIN_BACKING_VOTES, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, + MAX_POV_SIZE, ON_DEMAND_MAX_QUEUE_MAX_SIZE, }; use sp_runtime::{traits::Zero, Perbill}; use sp_std::prelude::*; @@ -187,7 +186,7 @@ pub struct HostConfiguration { /// /// Must be at least 1. pub no_show_slots: u32, - /// The number of delay tranches in total. + /// The number of delay tranches in total. Must be at least 1. pub n_delay_tranches: u32, /// The width of the zeroth delay tranche for approval assignments. This many delay tranches /// beyond 0 are all consolidated to form a wide 0 tranche. @@ -247,7 +246,7 @@ impl> Default for HostConfiguration { LookaheadExceedsTTL, /// Passed in queue size for on-demand was too large. OnDemandQueueSizeTooLarge, + /// Number of delay tranches cannot be 0. + ZeroDelayTranches, } impl HostConfiguration @@ -412,6 +413,10 @@ where return Err(OnDemandQueueSizeTooLarge) } + if self.n_delay_tranches.is_zero() { + return Err(ZeroDelayTranches) + } + Ok(()) } diff --git a/polkadot/runtime/parachains/src/configuration/migration/v10.rs b/polkadot/runtime/parachains/src/configuration/migration/v10.rs index cf228610e5c9cec1dde8cfb31880fa2b1f68821f..fa72c357d7dab20dfee26fefa270e677f6b8549f 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v10.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v10.rs @@ -17,17 +17,19 @@ //! A module that is responsible for migration of storage. use crate::configuration::{Config, Pallet}; -use frame_support::{pallet_prelude::*, traits::Defensive, weights::Weight}; +use frame_support::{ + pallet_prelude::*, + traits::{Defensive, UncheckedOnRuntimeUpgrade}, + weights::Weight, +}; use frame_system::pallet_prelude::BlockNumberFor; use primitives::{ - vstaging::NodeFeatures, AsyncBackingParams, Balance, ExecutorParams, SessionIndex, + AsyncBackingParams, Balance, ExecutorParams, NodeFeatures, SessionIndex, LEGACY_MIN_BACKING_VOTES, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, }; use sp_runtime::Perbill; use sp_std::vec::Vec; -use frame_support::traits::OnRuntimeUpgrade; - use super::v9::V9HostConfiguration; // All configuration of the runtime with respect to paras. #[derive(Clone, Encode, PartialEq, Decode, Debug)] @@ -163,7 +165,7 @@ mod v10 { } pub struct VersionUncheckedMigrateToV10(sp_std::marker::PhantomData); -impl OnRuntimeUpgrade for VersionUncheckedMigrateToV10 { +impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateToV10 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { log::trace!(target: crate::configuration::LOG_TARGET, "Running pre_upgrade() for HostConfiguration MigrateToV10"); diff --git a/polkadot/runtime/parachains/src/configuration/migration/v11.rs b/polkadot/runtime/parachains/src/configuration/migration/v11.rs index f6e0e0431640ba271f592759308cc0d07efcfa71..65656e8d7c065ac916dde9be670e8c3dfdbfc477 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v11.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v11.rs @@ -18,18 +18,19 @@ use crate::configuration::{self, Config, Pallet}; use frame_support::{ - migrations::VersionedMigration, pallet_prelude::*, traits::Defensive, weights::Weight, + migrations::VersionedMigration, + pallet_prelude::*, + traits::{Defensive, UncheckedOnRuntimeUpgrade}, + weights::Weight, }; use frame_system::pallet_prelude::BlockNumberFor; use primitives::{ - vstaging::ApprovalVotingParams, AsyncBackingParams, ExecutorParams, SessionIndex, + ApprovalVotingParams, AsyncBackingParams, ExecutorParams, NodeFeatures, SessionIndex, LEGACY_MIN_BACKING_VOTES, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, }; use sp_std::vec::Vec; -use frame_support::traits::OnRuntimeUpgrade; use polkadot_core_primitives::Balance; -use primitives::vstaging::NodeFeatures; use sp_arithmetic::Perbill; use super::v10::V10HostConfiguration; @@ -177,7 +178,7 @@ pub type MigrateToV11 = VersionedMigration< >; pub struct UncheckedMigrateToV11(sp_std::marker::PhantomData); -impl OnRuntimeUpgrade for UncheckedMigrateToV11 { +impl UncheckedOnRuntimeUpgrade for UncheckedMigrateToV11 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { log::trace!(target: crate::configuration::LOG_TARGET, "Running pre_upgrade() for HostConfiguration MigrateToV11"); diff --git a/polkadot/runtime/parachains/src/configuration/migration/v12.rs b/polkadot/runtime/parachains/src/configuration/migration/v12.rs index 4295a79893e8ee52632d54772bd108b4de37cc3f..69bacc83d044670ed65c9b168c07ae9acca508a5 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v12.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v12.rs @@ -20,7 +20,7 @@ use crate::configuration::{self, migration::v11::V11HostConfiguration, Config, P use frame_support::{ migrations::VersionedMigration, pallet_prelude::*, - traits::{Defensive, OnRuntimeUpgrade}, + traits::{Defensive, UncheckedOnRuntimeUpgrade}, }; use frame_system::pallet_prelude::BlockNumberFor; use primitives::vstaging::SchedulerParams; @@ -70,7 +70,7 @@ pub type MigrateToV12 = VersionedMigration< pub struct UncheckedMigrateToV12(sp_std::marker::PhantomData); -impl OnRuntimeUpgrade for UncheckedMigrateToV12 { +impl UncheckedOnRuntimeUpgrade for UncheckedMigrateToV12 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { log::trace!(target: crate::configuration::LOG_TARGET, "Running pre_upgrade() for HostConfiguration MigrateToV12"); diff --git a/polkadot/runtime/parachains/src/coretime/migration.rs b/polkadot/runtime/parachains/src/coretime/migration.rs index 03fecc58570fba65df79ca48930acb3346d3f556..193a5e46b999a8286cea232e0891e7810b60d35f 100644 --- a/polkadot/runtime/parachains/src/coretime/migration.rs +++ b/polkadot/runtime/parachains/src/coretime/migration.rs @@ -238,7 +238,7 @@ mod v_coretime { return None }, }; - // We assume the coretime chain set this parameter to the recommened value in RFC-1: + // We assume the coretime chain set this parameter to the recommended value in RFC-1: const TIME_SLICE_PERIOD: u32 = 80; let round_up = if valid_until % TIME_SLICE_PERIOD > 0 { 1 } else { 0 }; let time_slice = valid_until / TIME_SLICE_PERIOD + TIME_SLICE_PERIOD * round_up; diff --git a/polkadot/runtime/parachains/src/coretime/mod.rs b/polkadot/runtime/parachains/src/coretime/mod.rs index eb9646d7e869d35763a5b0cdf8a408de66126b78..9095cd90ae0cfea6a72fde2d4730cad28cf97d29 100644 --- a/polkadot/runtime/parachains/src/coretime/mod.rs +++ b/polkadot/runtime/parachains/src/coretime/mod.rs @@ -83,6 +83,8 @@ enum CoretimeCalls { SetLease(pallet_broker::TaskId, pallet_broker::Timeslice), #[codec(index = 19)] NotifyCoreCount(u16), + #[codec(index = 99)] + SwapLeases(ParaId, ParaId), } #[frame_support::pallet] @@ -233,6 +235,24 @@ impl Pallet { } } } + + // Handle legacy swaps in coretime. Notifies broker parachain that a lease swap has occurred via + // XCM message. This function is meant to be used in an implementation of `OnSwap` trait. + pub fn on_legacy_lease_swap(one: ParaId, other: ParaId) { + let message = Xcm(vec![ + Instruction::UnpaidExecution { + weight_limit: WeightLimit::Unlimited, + check_origin: None, + }, + mk_coretime_call(crate::coretime::CoretimeCalls::SwapLeases(one, other)), + ]); + if let Err(err) = send_xcm::( + Location::new(0, [Junction::Parachain(T::BrokerId::get())]), + message, + ) { + log::error!("Sending `SwapLeases` to coretime chain failed: {:?}", err); + } + } } impl OnNewSession> for Pallet { diff --git a/polkadot/runtime/parachains/src/disputes.rs b/polkadot/runtime/parachains/src/disputes.rs index da95b060c14a3fad03e98f5e2a75ac54eef1928c..8bba97ce4bccd23b91a3d2dd31b1d71f99e7b5a2 100644 --- a/polkadot/runtime/parachains/src/disputes.rs +++ b/polkadot/runtime/parachains/src/disputes.rs @@ -25,11 +25,11 @@ use frame_system::pallet_prelude::*; use parity_scale_codec::{Decode, Encode}; use polkadot_runtime_metrics::get_current_time; use primitives::{ - byzantine_threshold, supermajority_threshold, vstaging::ApprovalVoteMultipleCandidates, - ApprovalVote, CandidateHash, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, - CompactStatement, ConsensusLog, DisputeState, DisputeStatement, DisputeStatementSet, - ExplicitDisputeStatement, InvalidDisputeStatementKind, MultiDisputeStatementSet, SessionIndex, - SigningContext, ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorSignature, + byzantine_threshold, supermajority_threshold, ApprovalVote, ApprovalVoteMultipleCandidates, + CandidateHash, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, CompactStatement, + ConsensusLog, DisputeState, DisputeStatement, DisputeStatementSet, ExplicitDisputeStatement, + InvalidDisputeStatementKind, MultiDisputeStatementSet, SessionIndex, SigningContext, + ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorSignature, }; use scale_info::TypeInfo; use sp_runtime::{ @@ -181,7 +181,7 @@ pub trait DisputesHandler { fn is_frozen() -> bool; /// Remove dispute statement duplicates and sort the non-duplicates based on - /// local (lower indicies) vs remotes (higher indices) and age (older with lower indices). + /// local (lower indices) vs remotes (higher indices) and age (older with lower indices). /// /// Returns `Ok(())` if no duplicates were present, `Err(())` otherwise. /// diff --git a/polkadot/runtime/parachains/src/disputes/slashing.rs b/polkadot/runtime/parachains/src/disputes/slashing.rs index 9b2b7a48dc8b3ace0b4feea937b7efbe98e4950e..9f8fa1239187c0c95bca38a1a4a9d46dcd645c45 100644 --- a/polkadot/runtime/parachains/src/disputes/slashing.rs +++ b/polkadot/runtime/parachains/src/disputes/slashing.rs @@ -643,7 +643,7 @@ fn is_known_offence( } } -/// Actual `HandleReports` implemention. +/// Actual `HandleReports` implementation. /// /// When configured properly, should be instantiated with /// `T::KeyOwnerIdentification, Offences, ReportLongevity` parameters. diff --git a/polkadot/runtime/parachains/src/hrmp.rs b/polkadot/runtime/parachains/src/hrmp.rs index 42592d9d9f1496c882e0dd2d98e53cf06fcefcc1..d62533dc919b8157253be249e092c505861f1a8e 100644 --- a/polkadot/runtime/parachains/src/hrmp.rs +++ b/polkadot/runtime/parachains/src/hrmp.rs @@ -229,12 +229,12 @@ impl fmt::Debug for OutboundHrmpAcceptanceErr { ), TotalSizeExceeded { idx, total_size, limit } => write!( fmt, - "sending the HRMP message at index {} would exceed the neogitiated channel total size ({} > {})", + "sending the HRMP message at index {} would exceed the negotiated channel total size ({} > {})", idx, total_size, limit, ), CapacityExceeded { idx, count, limit } => write!( fmt, - "sending the HRMP message at index {} would exceed the neogitiated channel capacity ({} > {})", + "sending the HRMP message at index {} would exceed the negotiated channel capacity ({} > {})", idx, count, limit, ), } @@ -790,7 +790,7 @@ pub mod pallet { .ok_or(ArithmeticError::Underflow)?; T::Currency::unreserve( &channel_id.sender.into_account_truncating(), - // The difference should always be convertable into `Balance`, but be + // The difference should always be convertible into `Balance`, but be // paranoid and do nothing in case. amount.try_into().unwrap_or(Zero::zero()), ); diff --git a/polkadot/runtime/parachains/src/hrmp/tests.rs b/polkadot/runtime/parachains/src/hrmp/tests.rs index 7e7b67c8059dbf4ed9e2c4fc1cebb04efd44c89d..162c1412160139ce66f6b03c0fde9b6d55e95401 100644 --- a/polkadot/runtime/parachains/src/hrmp/tests.rs +++ b/polkadot/runtime/parachains/src/hrmp/tests.rs @@ -702,7 +702,7 @@ fn verify_externally_accessible() { sp_io::storage::get(&well_known_keys::hrmp_ingress_channel_index(para_b)) .expect("the ingress index must be present for para_b"); let ingress_index = >::decode(&mut &raw_ingress_index[..]) - .expect("ingress indexx should be decodable as a list of para ids"); + .expect("ingress index should be decodable as a list of para ids"); assert_eq!(ingress_index, vec![para_a]); // Now, verify that we can access and decode the egress index. diff --git a/polkadot/runtime/parachains/src/inclusion/migration.rs b/polkadot/runtime/parachains/src/inclusion/migration.rs index 1e63b209f4e78de48ef349d90b6ef40d21afe124..5f35680ee694c45f519a1e31478c4aef0f7c19b9 100644 --- a/polkadot/runtime/parachains/src/inclusion/migration.rs +++ b/polkadot/runtime/parachains/src/inclusion/migration.rs @@ -73,7 +73,7 @@ mod v1 { CandidatePendingAvailability as V1CandidatePendingAvailability, Config, Pallet, PendingAvailability as V1PendingAvailability, }; - use frame_support::{traits::OnRuntimeUpgrade, weights::Weight}; + use frame_support::{traits::UncheckedOnRuntimeUpgrade, weights::Weight}; use sp_core::Get; use sp_std::{collections::vec_deque::VecDeque, vec::Vec}; @@ -87,7 +87,7 @@ mod v1 { pub struct VersionUncheckedMigrateToV1(sp_std::marker::PhantomData); - impl OnRuntimeUpgrade for VersionUncheckedMigrateToV1 { + impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateToV1 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { log::trace!(target: crate::inclusion::LOG_TARGET, "Running pre_upgrade() for inclusion MigrateToV1"); @@ -216,7 +216,7 @@ mod tests { }, mock::{new_test_ext, MockGenesisConfig, Test}, }; - use frame_support::traits::OnRuntimeUpgrade; + use frame_support::traits::UncheckedOnRuntimeUpgrade; use primitives::{AvailabilityBitfield, Id as ParaId}; use test_helpers::{dummy_candidate_commitments, dummy_candidate_descriptor, dummy_hash}; @@ -225,7 +225,7 @@ mod tests { new_test_ext(MockGenesisConfig::default()).execute_with(|| { // No data to migrate. assert_eq!( - as OnRuntimeUpgrade>::on_runtime_upgrade(), + as UncheckedOnRuntimeUpgrade>::on_runtime_upgrade(), Weight::zero() ); assert!(V1PendingAvailability::::iter().next().is_none()); @@ -299,7 +299,7 @@ mod tests { // For tests, db weight is zero. assert_eq!( - as OnRuntimeUpgrade>::on_runtime_upgrade(), + as UncheckedOnRuntimeUpgrade>::on_runtime_upgrade(), Weight::zero() ); diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index e77f8d15b40d13deaaa96d47c04ef378a4a2a73c..34afdec724a58f97db0c9f0691e29ea56e8aca97 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -22,7 +22,7 @@ use crate::{ configuration::{self, HostConfiguration}, disputes, dmp, hrmp, - paras::{self, SetGoAhead}, + paras::{self, UpgradeStrategy}, scheduler, shared::{self, AllowedRelayParentsTracker}, util::make_persisted_validation_data_with_parent, @@ -839,7 +839,7 @@ impl Pallet { new_code, now, &config, - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, )); } diff --git a/polkadot/runtime/parachains/src/inclusion/tests.rs b/polkadot/runtime/parachains/src/inclusion/tests.rs index 5ab3a13324d29c3d78456f5287de1163530e6f39..97bf67ef934ebc87486587fd7b68d5a7eddc12ef 100644 --- a/polkadot/runtime/parachains/src/inclusion/tests.rs +++ b/polkadot/runtime/parachains/src/inclusion/tests.rs @@ -1590,7 +1590,7 @@ fn candidate_checks() { vec![9, 8, 7, 6, 5, 4, 3, 2, 1].into(), expected_at, &cfg, - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); } @@ -2857,7 +2857,7 @@ fn para_upgrade_delay_scheduled_from_inclusion() { let cause = &active_vote_state.causes()[0]; // Upgrade block is the block of inclusion, not candidate's parent. assert_matches!(cause, - paras::PvfCheckCause::Upgrade { id, included_at, set_go_ahead: SetGoAhead::Yes } + paras::PvfCheckCause::Upgrade { id, included_at, upgrade_strategy: UpgradeStrategy::SetGoAheadSignal } if id == &chain_a && included_at == &7 ); }); diff --git a/polkadot/runtime/parachains/src/lib.rs b/polkadot/runtime/parachains/src/lib.rs index b0dc27b72863fe76d5d23a9bf18408907d2b89dd..466bc7685ddfb46d3e5dfa86d81cfa3f3baeadcd 100644 --- a/polkadot/runtime/parachains/src/lib.rs +++ b/polkadot/runtime/parachains/src/lib.rs @@ -54,7 +54,7 @@ mod mock; mod ump_tests; pub use origin::{ensure_parachain, Origin}; -pub use paras::{ParaLifecycle, SetGoAhead}; +pub use paras::{ParaLifecycle, UpgradeStrategy}; use primitives::{HeadData, Id as ParaId, ValidationCode}; use sp_runtime::{DispatchResult, FixedU128}; @@ -104,7 +104,7 @@ pub fn schedule_parachain_downgrade(id: ParaId) -> Result<(), pub fn schedule_code_upgrade( id: ParaId, new_code: ValidationCode, - set_go_ahead: SetGoAhead, + set_go_ahead: UpgradeStrategy, ) -> DispatchResult { paras::Pallet::::schedule_code_upgrade_external(id, new_code, set_go_ahead) } diff --git a/polkadot/runtime/parachains/src/mock.rs b/polkadot/runtime/parachains/src/mock.rs index 7ed62a392e4e532b244b628d7f02a0dea2209c34..461b9f4b431acea91614b295d47e3d4c273adccf 100644 --- a/polkadot/runtime/parachains/src/mock.rs +++ b/polkadot/runtime/parachains/src/mock.rs @@ -365,6 +365,7 @@ impl pallet_message_queue::Config for Test { type HeapSize = ConstU32<65536>; type MaxStale = ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = (); } parameter_types! { diff --git a/polkadot/runtime/parachains/src/paras/benchmarking.rs b/polkadot/runtime/parachains/src/paras/benchmarking.rs index 554f0c15af24ff632e6e5ae9da34baf43aa0975e..56e6ff153c15f7f167e7744575bc3128fa0e3845 100644 --- a/polkadot/runtime/parachains/src/paras/benchmarking.rs +++ b/polkadot/runtime/parachains/src/paras/benchmarking.rs @@ -129,7 +129,7 @@ benchmarks! { ValidationCode(vec![0]), expired, &config, - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); }: _(RawOrigin::Root, para_id, new_head) verify { diff --git a/polkadot/runtime/parachains/src/paras/benchmarking/pvf_check.rs b/polkadot/runtime/parachains/src/paras/benchmarking/pvf_check.rs index 53ccc35c477c093f69d06f8c9d6902e41627e0af..0bf5aa86c40f6ee18c9fe4072b08b26660f87256 100644 --- a/polkadot/runtime/parachains/src/paras/benchmarking/pvf_check.rs +++ b/polkadot/runtime/parachains/src/paras/benchmarking/pvf_check.rs @@ -177,7 +177,7 @@ where validation_code, /* relay_parent_number */ 1u32.into(), &configuration::Pallet::::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); } else { let r = Pallet::::schedule_para_initialize( diff --git a/polkadot/runtime/parachains/src/paras/mod.rs b/polkadot/runtime/parachains/src/paras/mod.rs index 5acdb9683bbf17349f35f280c6a268c49ebb5f39..3eb66112fedf4c72c0708913bc51af1ccfea6b05 100644 --- a/polkadot/runtime/parachains/src/paras/mod.rs +++ b/polkadot/runtime/parachains/src/paras/mod.rs @@ -386,16 +386,32 @@ pub(crate) enum PvfCheckCause { /// /// See https://github.com/paritytech/polkadot/issues/4601 for detailed explanation. included_at: BlockNumber, - /// Whether or not the given para should be sent the `GoAhead` signal. - set_go_ahead: SetGoAhead, + /// Whether or not the upgrade should be enacted directly. + /// + /// If set to `Yes` it means that no `GoAheadSignal` will be set and the parachain code + /// will also be overwritten directly. + upgrade_strategy: UpgradeStrategy, }, } -/// Should the `GoAhead` signal be set after a successful check of the new wasm binary? +/// The strategy on how to handle a validation code upgrade. +/// +/// When scheduling a parachain code upgrade the upgrade first is checked by all validators. The +/// validators ensure that the new validation code can be compiled and instantiated. After the +/// majority of the validators have reported their checking result the upgrade is either scheduled +/// or aborted. This strategy then comes into play around the relay chain block this upgrade was +/// scheduled in. #[derive(Debug, Copy, Clone, PartialEq, TypeInfo, Decode, Encode)] -pub enum SetGoAhead { - Yes, - No, +pub enum UpgradeStrategy { + /// Set the `GoAhead` signal to inform the parachain that it is time to upgrade. + /// + /// The upgrade will then be applied after the first parachain block was enacted that must have + /// observed the `GoAhead` signal. + SetGoAheadSignal, + /// Apply the upgrade directly at the expected relay chain block. + /// + /// This doesn't wait for the parachain to make any kind of progress. + ApplyAtExpectedBlock, } impl PvfCheckCause { @@ -758,7 +774,8 @@ pub mod pallet { pub(super) type PastCodePruning = StorageValue<_, Vec<(ParaId, BlockNumberFor)>, ValueQuery>; - /// The block number at which the planned code change is expected for a para. + /// The block number at which the planned code change is expected for a parachain. + /// /// The change will be applied after the first parablock for this ID included which executes /// in the context of a relay chain block with a number >= `expected_at`. #[pallet::storage] @@ -766,6 +783,18 @@ pub mod pallet { pub(super) type FutureCodeUpgrades = StorageMap<_, Twox64Concat, ParaId, BlockNumberFor>; + /// The list of upcoming future code upgrades. + /// + /// Each item is a pair of the parachain and the expected block at which the upgrade should be + /// applied. The upgrade will be applied at the given relay chain block. In contrast to + /// [`FutureCodeUpgrades`] this code upgrade will be applied regardless the parachain making any + /// progress or not. + /// + /// Ordered ascending by block number. + #[pallet::storage] + pub(super) type FutureCodeUpgradesAt = + StorageValue<_, Vec<(ParaId, BlockNumberFor)>, ValueQuery>; + /// The actual future code hash of a para. /// /// Corresponding code can be retrieved with [`CodeByHash`]. @@ -809,8 +838,10 @@ pub mod pallet { pub(super) type UpgradeCooldowns = StorageValue<_, Vec<(ParaId, BlockNumberFor)>, ValueQuery>; - /// The list of upcoming code upgrades. Each item is a pair of which para performs a code - /// upgrade and at which relay-chain block it is expected at. + /// The list of upcoming code upgrades. + /// + /// Each item is a pair of which para performs a code upgrade and at which relay-chain block it + /// is expected at. /// /// Ordered ascending by block number. #[pallet::storage] @@ -880,21 +911,9 @@ pub mod pallet { new_code: ValidationCode, ) -> DispatchResult { ensure_root(origin)?; - let maybe_prior_code_hash = CurrentCodeHash::::get(¶); let new_code_hash = new_code.hash(); Self::increase_code_ref(&new_code_hash, &new_code); - CurrentCodeHash::::insert(¶, new_code_hash); - - let now = frame_system::Pallet::::block_number(); - if let Some(prior_code_hash) = maybe_prior_code_hash { - Self::note_past_code(para, now, now, prior_code_hash); - } else { - log::error!( - target: LOG_TARGET, - "Pallet paras storage is inconsistent, prior code not found {:?}", - ¶ - ); - } + Self::set_current_code(para, new_code_hash, frame_system::Pallet::::block_number()); Self::deposit_event(Event::CurrentCodeUpdated(para)); Ok(()) } @@ -928,7 +947,7 @@ pub mod pallet { new_code, relay_parent_number, &config, - SetGoAhead::No, + UpgradeStrategy::ApplyAtExpectedBlock, ); Self::deposit_event(Event::CodeUpgradeScheduled(para)); Ok(()) @@ -1227,7 +1246,7 @@ impl Pallet { pub(crate) fn schedule_code_upgrade_external( id: ParaId, new_code: ValidationCode, - set_go_ahead: SetGoAhead, + upgrade_strategy: UpgradeStrategy, ) -> DispatchResult { // Check that we can schedule an upgrade at all. ensure!(Self::can_upgrade_validation_code(id), Error::::CannotUpgradeCode); @@ -1239,7 +1258,7 @@ impl Pallet { let current_block = frame_system::Pallet::::block_number(); // Schedule the upgrade with a delay just like if a parachain triggered the upgrade. let upgrade_block = current_block.saturating_add(config.validation_upgrade_delay); - Self::schedule_code_upgrade(id, new_code, upgrade_block, &config, set_go_ahead); + Self::schedule_code_upgrade(id, new_code, upgrade_block, &config, upgrade_strategy); Self::deposit_event(Event::CodeUpgradeScheduled(id)); Ok(()) } @@ -1252,8 +1271,9 @@ impl Pallet { /// Called by the initializer to initialize the paras pallet. pub(crate) fn initializer_initialize(now: BlockNumberFor) -> Weight { - let weight = Self::prune_old_code(now); - weight + Self::process_scheduled_upgrade_changes(now) + Self::prune_old_code(now) + + Self::process_scheduled_upgrade_changes(now) + + Self::process_future_code_upgrades_at(now) } /// Called by the initializer to finalize the paras pallet. @@ -1355,16 +1375,13 @@ impl Pallet { // NOTE both of those iterates over the list and the outgoing. We do not expect either // of these to be large. Thus should be fine. UpcomingUpgrades::::mutate(|upcoming_upgrades| { - *upcoming_upgrades = mem::take(upcoming_upgrades) - .into_iter() - .filter(|(para, _)| !outgoing.contains(para)) - .collect(); + upcoming_upgrades.retain(|(para, _)| !outgoing.contains(para)); }); UpgradeCooldowns::::mutate(|upgrade_cooldowns| { - *upgrade_cooldowns = mem::take(upgrade_cooldowns) - .into_iter() - .filter(|(para, _)| !outgoing.contains(para)) - .collect(); + upgrade_cooldowns.retain(|(para, _)| !outgoing.contains(para)); + }); + FutureCodeUpgradesAt::::mutate(|future_upgrades| { + future_upgrades.retain(|(para, _)| !outgoing.contains(para)); }); } @@ -1460,6 +1477,37 @@ impl Pallet { T::DbWeight::get().reads_writes(1 + pruning_tasks_done, 2 * pruning_tasks_done) } + /// Process the future code upgrades that should be applied directly. + /// + /// Upgrades that should not be applied directly are being processed in + /// [`Self::process_scheduled_upgrade_changes`]. + fn process_future_code_upgrades_at(now: BlockNumberFor) -> Weight { + // account weight for `FutureCodeUpgradeAt::mutate`. + let mut weight = T::DbWeight::get().reads_writes(1, 1); + FutureCodeUpgradesAt::::mutate( + |upcoming_upgrades: &mut Vec<(ParaId, BlockNumberFor)>| { + let num = upcoming_upgrades.iter().take_while(|&(_, at)| at <= &now).count(); + for (id, expected_at) in upcoming_upgrades.drain(..num) { + weight += T::DbWeight::get().reads_writes(1, 1); + + // Both should always be `Some` in this case, since a code upgrade is scheduled. + let new_code_hash = if let Some(new_code_hash) = FutureCodeHash::::take(&id) + { + new_code_hash + } else { + log::error!(target: LOG_TARGET, "Missing future code hash for {:?}", &id); + continue + }; + + weight += Self::set_current_code(id, new_code_hash, expected_at); + } + num + }, + ); + + weight + } + /// Process the timers related to upgrades. Specifically, the upgrade go ahead signals toggle /// and the upgrade cooldown restrictions. However, this function does not actually unset /// the upgrade restriction, that will happen in the `initializer_finalize` function. However, @@ -1580,14 +1628,14 @@ impl Pallet { PvfCheckCause::Onboarding(id) => { weight += Self::proceed_with_onboarding(*id, sessions_observed); }, - PvfCheckCause::Upgrade { id, included_at, set_go_ahead } => { + PvfCheckCause::Upgrade { id, included_at, upgrade_strategy } => { weight += Self::proceed_with_upgrade( *id, code_hash, now, *included_at, cfg, - *set_go_ahead, + *upgrade_strategy, ); }, } @@ -1621,38 +1669,50 @@ impl Pallet { now: BlockNumberFor, relay_parent_number: BlockNumberFor, cfg: &configuration::HostConfiguration>, - set_go_ahead: SetGoAhead, + upgrade_strategy: UpgradeStrategy, ) -> Weight { let mut weight = Weight::zero(); - // Compute the relay-chain block number starting at which the code upgrade is ready to be - // applied. + // Compute the relay-chain block number starting at which the code upgrade is ready to + // be applied. // - // The first parablock that has a relay-parent higher or at the same height of `expected_at` - // will trigger the code upgrade. The parablock that comes after that will be validated - // against the new validation code. + // The first parablock that has a relay-parent higher or at the same height of + // `expected_at` will trigger the code upgrade. The parablock that comes after that will + // be validated against the new validation code. // - // Here we are trying to choose the block number that will have `validation_upgrade_delay` - // blocks from the relay-parent of inclusion of the the block that scheduled code upgrade - // but no less than `minimum_validation_upgrade_delay`. We want this delay out of caution - // so that when the last vote for pre-checking comes the parachain will have some time until - // the upgrade finally takes place. + // Here we are trying to choose the block number that will have + // `validation_upgrade_delay` blocks from the relay-parent of inclusion of the the block + // that scheduled code upgrade but no less than `minimum_validation_upgrade_delay`. We + // want this delay out of caution so that when the last vote for pre-checking comes the + // parachain will have some time until the upgrade finally takes place. let expected_at = cmp::max( relay_parent_number + cfg.validation_upgrade_delay, now + cfg.minimum_validation_upgrade_delay, ); - weight += T::DbWeight::get().reads_writes(1, 4); - FutureCodeUpgrades::::insert(&id, expected_at); + match upgrade_strategy { + UpgradeStrategy::ApplyAtExpectedBlock => { + FutureCodeUpgradesAt::::mutate(|future_upgrades| { + let insert_idx = future_upgrades + .binary_search_by_key(&expected_at, |&(_, b)| b) + .unwrap_or_else(|idx| idx); + future_upgrades.insert(insert_idx, (id, expected_at)); + }); - // Only set an upcoming upgrade if `GoAhead` signal should be set for the respective para. - if set_go_ahead == SetGoAhead::Yes { - UpcomingUpgrades::::mutate(|upcoming_upgrades| { - let insert_idx = upcoming_upgrades - .binary_search_by_key(&expected_at, |&(_, b)| b) - .unwrap_or_else(|idx| idx); - upcoming_upgrades.insert(insert_idx, (id, expected_at)); - }); + weight += T::DbWeight::get().reads_writes(0, 2); + }, + UpgradeStrategy::SetGoAheadSignal => { + FutureCodeUpgrades::::insert(&id, expected_at); + + UpcomingUpgrades::::mutate(|upcoming_upgrades| { + let insert_idx = upcoming_upgrades + .binary_search_by_key(&expected_at, |&(_, b)| b) + .unwrap_or_else(|idx| idx); + upcoming_upgrades.insert(insert_idx, (id, expected_at)); + }); + + weight += T::DbWeight::get().reads_writes(1, 3); + }, } let expected_at = expected_at.saturated_into(); @@ -1756,7 +1816,7 @@ impl Pallet { // // - Empty value is treated as the current code is already inserted during the onboarding. // - // This is only an intermediate solution and should be fixed in foreseable future. + // This is only an intermediate solution and should be fixed in foreseeable future. // // [soaking issue]: https://github.com/paritytech/polkadot/issues/3918 let validation_code = @@ -1892,7 +1952,7 @@ impl Pallet { new_code: ValidationCode, inclusion_block_number: BlockNumberFor, cfg: &configuration::HostConfiguration>, - set_go_ahead: SetGoAhead, + upgrade_strategy: UpgradeStrategy, ) -> Weight { let mut weight = T::DbWeight::get().reads(1); @@ -1949,7 +2009,7 @@ impl Pallet { }); weight += Self::kick_off_pvf_check( - PvfCheckCause::Upgrade { id, included_at: inclusion_block_number, set_go_ahead }, + PvfCheckCause::Upgrade { id, included_at: inclusion_block_number, upgrade_strategy }, code_hash, new_code, cfg, @@ -2061,24 +2121,10 @@ impl Pallet { log::error!(target: LOG_TARGET, "Missing future code hash for {:?}", &id); return T::DbWeight::get().reads_writes(3, 1 + 3) }; - let maybe_prior_code_hash = CurrentCodeHash::::get(&id); - CurrentCodeHash::::insert(&id, &new_code_hash); - let log = ConsensusLog::ParaUpgradeCode(id, new_code_hash); - >::deposit_log(log.into()); + let weight = Self::set_current_code(id, new_code_hash, expected_at); - // `now` is only used for registering pruning as part of `fn note_past_code` - let now = >::block_number(); - - let weight = if let Some(prior_code_hash) = maybe_prior_code_hash { - Self::note_past_code(id, expected_at, now, prior_code_hash) - } else { - log::error!(target: LOG_TARGET, "Missing prior code hash for para {:?}", &id); - Weight::zero() - }; - - // add 1 to writes due to heads update. - weight + T::DbWeight::get().reads_writes(3, 1 + 3) + weight + T::DbWeight::get().reads_writes(3, 3) } else { T::DbWeight::get().reads_writes(1, 1 + 0) } @@ -2094,6 +2140,34 @@ impl Pallet { weight.saturating_add(T::OnNewHead::on_new_head(id, &new_head)) } + /// Set the current code for the given parachain. + // `at` for para-triggered replacement is the block number of the relay-chain + // block in whose context the parablock was executed + // (i.e. number of `relay_parent` in the receipt) + pub(crate) fn set_current_code( + id: ParaId, + new_code_hash: ValidationCodeHash, + at: BlockNumberFor, + ) -> Weight { + let maybe_prior_code_hash = CurrentCodeHash::::get(&id); + CurrentCodeHash::::insert(&id, &new_code_hash); + + let log = ConsensusLog::ParaUpgradeCode(id, new_code_hash); + >::deposit_log(log.into()); + + // `now` is only used for registering pruning as part of `fn note_past_code` + let now = >::block_number(); + + let weight = if let Some(prior_code_hash) = maybe_prior_code_hash { + Self::note_past_code(id, at, now, prior_code_hash) + } else { + log::error!(target: LOG_TARGET, "Missing prior code hash for para {:?}", &id); + Weight::zero() + }; + + weight + T::DbWeight::get().writes(1) + } + /// Returns the list of PVFs (aka validation code) that require casting a vote by a validator in /// the active validator set. pub(crate) fn pvfs_require_precheck() -> Vec { diff --git a/polkadot/runtime/parachains/src/paras/tests.rs b/polkadot/runtime/parachains/src/paras/tests.rs index 262ec9d3fdba95c0368d4cfe03b288ccaeb049d1..ad75166271e3aa49e6c907a62e8ee7508e9e0e02 100644 --- a/polkadot/runtime/parachains/src/paras/tests.rs +++ b/polkadot/runtime/parachains/src/paras/tests.rs @@ -451,7 +451,7 @@ fn code_upgrade_applied_after_delay() { new_code.clone(), 1, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); // Include votes for super-majority. submit_super_majority_pvf_votes(&new_code, EXPECTED_SESSION, true); @@ -521,7 +521,7 @@ fn code_upgrade_applied_after_delay() { } #[test] -fn code_upgrade_applied_without_setting_go_ahead_signal() { +fn upgrade_strategy_apply_at_expected_block_works() { let code_retention_period = 10; let validation_upgrade_delay = 5; let validation_upgrade_cooldown = 10; @@ -560,77 +560,42 @@ fn code_upgrade_applied_without_setting_go_ahead_signal() { run_to_block(2, Some(vec![1])); assert_eq!(Paras::current_code(¶_id), Some(original_code.clone())); - let (expected_at, next_possible_upgrade_at) = { - // this parablock is in the context of block 1. - let expected_at = 1 + validation_upgrade_delay; - let next_possible_upgrade_at = 1 + validation_upgrade_cooldown; - // `set_go_ahead` parameter set to `false` which prevents signaling the parachain - // with the `GoAhead` signal. - Paras::schedule_code_upgrade( - para_id, - new_code.clone(), - 1, - &Configuration::config(), - SetGoAhead::No, - ); - // Include votes for super-majority. - submit_super_majority_pvf_votes(&new_code, EXPECTED_SESSION, true); - - Paras::note_new_head(para_id, Default::default(), 1); - - assert!(Paras::past_code_meta(¶_id).most_recent_change().is_none()); - assert_eq!(FutureCodeUpgrades::::get(¶_id), Some(expected_at)); - assert_eq!(FutureCodeHash::::get(¶_id), Some(new_code.hash())); - assert_eq!(UpcomingUpgrades::::get(), vec![]); - assert_eq!(UpgradeCooldowns::::get(), vec![(para_id, next_possible_upgrade_at)]); - assert_eq!(Paras::current_code(¶_id), Some(original_code.clone())); - check_code_is_stored(&original_code); - check_code_is_stored(&new_code); - - (expected_at, next_possible_upgrade_at) - }; + // this parablock is in the context of block 1. + let expected_at = 1 + validation_upgrade_delay; + let next_possible_upgrade_at = 1 + validation_upgrade_cooldown; + // `set_go_ahead` parameter set to `false` which prevents signaling the parachain + // with the `GoAhead` signal. + Paras::schedule_code_upgrade( + para_id, + new_code.clone(), + 1, + &Configuration::config(), + UpgradeStrategy::ApplyAtExpectedBlock, + ); + // Include votes for super-majority. + submit_super_majority_pvf_votes(&new_code, EXPECTED_SESSION, true); + assert!(FutureCodeUpgradesAt::::get().iter().any(|(id, _)| *id == para_id)); + // Going to the expected block triggers the upgrade directly. run_to_block(expected_at, None); - // the candidate is in the context of the parent of `expected_at`, - // thus does not trigger the code upgrade. However, now the `UpgradeGoAheadSignal` - // should not be set. - { - Paras::note_new_head(para_id, Default::default(), expected_at - 1); - - assert!(Paras::past_code_meta(¶_id).most_recent_change().is_none()); - assert_eq!(FutureCodeUpgrades::::get(¶_id), Some(expected_at)); - assert_eq!(FutureCodeHash::::get(¶_id), Some(new_code.hash())); - assert!(UpgradeGoAheadSignal::::get(¶_id).is_none()); - assert_eq!(Paras::current_code(¶_id), Some(original_code.clone())); - check_code_is_stored(&original_code); - check_code_is_stored(&new_code); - } - - run_to_block(expected_at + 1, None); - - // the candidate is in the context of `expected_at`, and triggers - // the upgrade. - { - Paras::note_new_head(para_id, Default::default(), expected_at); + // Reporting a head doesn't change anything. + Paras::note_new_head(para_id, Default::default(), expected_at - 1); - assert_eq!(Paras::past_code_meta(¶_id).most_recent_change(), Some(expected_at)); - assert_eq!( - PastCodeHash::::get(&(para_id, expected_at)), - Some(original_code.hash()), - ); - assert!(FutureCodeUpgrades::::get(¶_id).is_none()); - assert!(FutureCodeHash::::get(¶_id).is_none()); - assert!(UpgradeGoAheadSignal::::get(¶_id).is_none()); - assert_eq!(Paras::current_code(¶_id), Some(new_code.clone())); - assert_eq!( - UpgradeRestrictionSignal::::get(¶_id), - Some(UpgradeRestriction::Present), - ); - assert_eq!(UpgradeCooldowns::::get(), vec![(para_id, next_possible_upgrade_at)]); - check_code_is_stored(&original_code); - check_code_is_stored(&new_code); - } + assert_eq!(Paras::past_code_meta(¶_id).most_recent_change(), Some(expected_at)); + assert_eq!(PastCodeHash::::get(&(para_id, expected_at)), Some(original_code.hash())); + assert!(FutureCodeUpgrades::::get(¶_id).is_none()); + assert!(FutureCodeUpgradesAt::::get().iter().all(|(id, _)| *id != para_id)); + assert!(FutureCodeHash::::get(¶_id).is_none()); + assert!(UpgradeGoAheadSignal::::get(¶_id).is_none()); + assert_eq!(Paras::current_code(¶_id), Some(new_code.clone())); + assert_eq!( + UpgradeRestrictionSignal::::get(¶_id), + Some(UpgradeRestriction::Present), + ); + assert_eq!(UpgradeCooldowns::::get(), vec![(para_id, next_possible_upgrade_at)]); + check_code_is_stored(&original_code); + check_code_is_stored(&new_code); run_to_block(next_possible_upgrade_at + 1, None); @@ -688,7 +653,7 @@ fn code_upgrade_applied_after_delay_even_when_late() { new_code.clone(), 1, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); // Include votes for super-majority. submit_super_majority_pvf_votes(&new_code, EXPECTED_SESSION, true); @@ -772,7 +737,7 @@ fn submit_code_change_when_not_allowed_is_err() { new_code.clone(), 1, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); // Include votes for super-majority. submit_super_majority_pvf_votes(&new_code, EXPECTED_SESSION, true); @@ -790,7 +755,7 @@ fn submit_code_change_when_not_allowed_is_err() { newer_code.clone(), 2, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); assert_eq!( FutureCodeUpgrades::::get(¶_id), @@ -854,7 +819,7 @@ fn upgrade_restriction_elapsed_doesnt_mean_can_upgrade() { new_code.clone(), 0, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); // Include votes for super-majority. submit_super_majority_pvf_votes(&new_code, EXPECTED_SESSION, true); @@ -879,7 +844,7 @@ fn upgrade_restriction_elapsed_doesnt_mean_can_upgrade() { newer_code.clone(), 30, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); assert_eq!(FutureCodeUpgrades::::get(¶_id), Some(0 + validation_upgrade_delay)); }); @@ -940,7 +905,7 @@ fn full_parachain_cleanup_storage() { new_code.clone(), 1, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); // Include votes for super-majority. submit_super_majority_pvf_votes(&new_code, EXPECTED_SESSION, true); @@ -1036,7 +1001,7 @@ fn cannot_offboard_ongoing_pvf_check() { new_code.clone(), RELAY_PARENT, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); assert!(!Paras::pvfs_require_precheck().is_empty()); @@ -1194,7 +1159,7 @@ fn code_hash_at_returns_up_to_end_of_code_retention_period() { new_code.clone(), 0, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); // Include votes for super-majority. submit_super_majority_pvf_votes(&new_code, EXPECTED_SESSION, true); @@ -1210,7 +1175,7 @@ fn code_hash_at_returns_up_to_end_of_code_retention_period() { assert_eq!(Paras::past_code_meta(¶_id).upgrade_times, vec![upgrade_at(4, 10)]); assert_eq!(Paras::current_code(¶_id), Some(new_code.clone())); - // Make sure that the old code is available **before** the code retion period passes. + // Make sure that the old code is available **before** the code retention period passes. run_to_block(10 + code_retention_period, None); assert_eq!(Paras::code_by_hash(&old_code.hash()), Some(old_code.clone())); assert_eq!(Paras::code_by_hash(&new_code.hash()), Some(new_code.clone())); @@ -1303,7 +1268,7 @@ fn pvf_check_coalescing_onboarding_and_upgrade() { validation_code.clone(), RELAY_PARENT, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); assert!(!Paras::pvfs_require_precheck().is_empty()); @@ -1413,7 +1378,7 @@ fn pvf_check_upgrade_reject() { new_code.clone(), RELAY_PARENT, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); check_code_is_stored(&new_code); @@ -1599,7 +1564,7 @@ fn include_pvf_check_statement_refunds_weight() { new_code.clone(), RELAY_PARENT, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); let mut stmts = IntoIterator::into_iter([0, 1, 2, 3]) @@ -1700,7 +1665,7 @@ fn poke_unused_validation_code_doesnt_remove_code_with_users() { validation_code.clone(), 1, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); Paras::note_new_head(para_id, HeadData::default(), 1); @@ -1717,7 +1682,7 @@ fn poke_unused_validation_code_doesnt_remove_code_with_users() { #[test] fn increase_code_ref_doesnt_have_allergy_on_add_trusted_validation_code() { - // Verify that accidential calling of increase_code_ref or decrease_code_ref does not lead + // Verify that accidental calling of increase_code_ref or decrease_code_ref does not lead // to a disaster. // NOTE that this test is extra paranoid, as it is not really possible to hit // `decrease_code_ref` without calling `increase_code_ref` first. @@ -1771,7 +1736,7 @@ fn add_trusted_validation_code_insta_approval() { validation_code.clone(), 1, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); Paras::note_new_head(para_id, HeadData::default(), 1); @@ -1813,7 +1778,7 @@ fn add_trusted_validation_code_enacts_existing_pvf_vote() { validation_code.clone(), 1, &Configuration::config(), - SetGoAhead::Yes, + UpgradeStrategy::SetGoAheadSignal, ); Paras::note_new_head(para_id, HeadData::default(), 1); diff --git a/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs b/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs index 1b07acffb1549f60caa6a5172b46eca3c957cf4e..8f6f2166a66a5338841b7174a5ff3a1723ae4f1c 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs @@ -20,7 +20,7 @@ use frame_benchmarking::{benchmarks, impl_benchmark_test_suite}; use frame_system::RawOrigin; use sp_std::{cmp::min, collections::btree_map::BTreeMap}; -use primitives::v6::GroupIndex; +use primitives::v7::GroupIndex; use crate::builder::BenchBuilder; diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 02ddfd0accab266c2130a5c34de457c1b7b12629..31c9ab84b60cd3ea1ccf33e4152acad8454313cf 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -43,9 +43,9 @@ use frame_support::{ use frame_system::pallet_prelude::*; use pallet_babe::{self, ParentBlockRandomness}; use primitives::{ - effective_minimum_backing_votes, vstaging::node_features::FeatureIndex, BackedCandidate, - CandidateHash, CandidateReceipt, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, - CoreIndex, DisputeStatementSet, HeadData, InherentData as ParachainsInherentData, + effective_minimum_backing_votes, node_features::FeatureIndex, BackedCandidate, CandidateHash, + CandidateReceipt, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, CoreIndex, + DisputeStatementSet, HeadData, InherentData as ParachainsInherentData, MultiDisputeStatementSet, ScrapedOnChainVotes, SessionIndex, SignedAvailabilityBitfields, SigningContext, UncheckedSignedAvailabilityBitfield, UncheckedSignedAvailabilityBitfields, ValidatorId, ValidatorIndex, ValidityAttestation, PARACHAINS_INHERENT_IDENTIFIER, @@ -733,7 +733,7 @@ fn random_sel Weight>( /// are preferred. And for disputes, local and older disputes are preferred (see /// `limit_and_sanitize_disputes`). for backed candidates, since with a increasing number of /// parachains their chances of inclusion become slim. All backed candidates are checked -/// beforehands in `fn create_inherent_inner` which guarantees sanity. +/// beforehand in `fn create_inherent_inner` which guarantees sanity. /// /// Assumes disputes are already filtered by the time this is called. /// @@ -1070,7 +1070,7 @@ fn limit_and_sanitize_disputes< log::debug!(target: LOG_TARGET, "Above max consumable weight: {}/{}", disputes_weight, max_consumable_weight); let mut checked_acc = Vec::::with_capacity(disputes.len()); - // Accumualated weight of all disputes picked, that passed the checks. + // Accumulated weight of all disputes picked, that passed the checks. let mut weight_acc = Weight::zero(); // Select disputes in-order until the remaining weight is attained diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs b/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs index ba74e488cd3b7aac66fb4e62938235b491cce839..ed2e95b3cfa989f0c00f32af63f5f3d015b175ae 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs @@ -26,5 +26,5 @@ //! 2. Move methods from `vstaging` to `v3`. The new stable version should include all methods from //! `vstaging` tagged with the new version number (e.g. all `v3` methods). -pub mod v7; +pub mod v10; pub mod vstaging; diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs b/polkadot/runtime/parachains/src/runtime_api_impl/v10.rs similarity index 93% rename from polkadot/runtime/parachains/src/runtime_api_impl/v7.rs rename to polkadot/runtime/parachains/src/runtime_api_impl/v10.rs index 171f3f746a82f16d0929f60100fdec5b00b9dab7..21f54121ab18f0da0b3b8560477444ba82ad0080 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/v10.rs @@ -14,7 +14,7 @@ //! A module exporting runtime API implementation functions for all runtime APIs using `v5` //! primitives. //! -//! Runtimes implementing the v2 runtime API are recommended to forward directly to these +//! Runtimes implementing the v10 runtime API are recommended to forward directly to these //! functions. use crate::{ @@ -29,11 +29,12 @@ use primitives::{ AsyncBackingParams, BackingState, CandidatePendingAvailability, Constraints, InboundHrmpLimitations, OutboundHrmpChannelLimitations, }, - slashing, AuthorityDiscoveryId, CandidateEvent, CandidateHash, CommittedCandidateReceipt, - CoreIndex, CoreState, DisputeState, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, - Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, OccupiedCore, OccupiedCoreAssumption, - PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, - ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, + slashing, ApprovalVotingParams, AuthorityDiscoveryId, CandidateEvent, CandidateHash, + CommittedCandidateReceipt, CoreIndex, CoreState, DisputeState, ExecutorParams, GroupIndex, + GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, + NodeFeatures, OccupiedCore, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, + ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, + ValidatorId, ValidatorIndex, ValidatorSignature, }; use sp_runtime::traits::One; use sp_std::{collections::btree_map::BTreeMap, prelude::*}; @@ -517,3 +518,24 @@ pub fn backing_state( pub fn async_backing_params() -> AsyncBackingParams { >::config().async_backing_params } + +/// Implementation for `DisabledValidators` +// CAVEAT: this should only be called on the node side +// as it might produce incorrect results on session boundaries +pub fn disabled_validators() -> Vec +where + T: shared::Config, +{ + >::disabled_validators() +} + +/// Returns the current state of the node features. +pub fn node_features() -> NodeFeatures { + >::config().node_features +} + +/// Approval voting subsystem configuration parameters +pub fn approval_voting_params() -> ApprovalVotingParams { + let config = >::config(); + config.approval_voting_params +} diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs index 296b872e8d41bb212bf1ec484056a7b5762e0134..28be3f53863b4fc637c6a1746fcc67dca1eba4f2 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs @@ -16,36 +16,9 @@ //! Put implementations of functions from staging APIs here. -use crate::{configuration, initializer, scheduler, shared}; -use primitives::{ - vstaging::{ApprovalVotingParams, NodeFeatures}, - CoreIndex, Id as ParaId, ValidatorIndex, -}; -use sp_std::{ - collections::{btree_map::BTreeMap, vec_deque::VecDeque}, - prelude::Vec, -}; - -/// Implementation for `DisabledValidators` -// CAVEAT: this should only be called on the node side -// as it might produce incorrect results on session boundaries -pub fn disabled_validators() -> Vec -where - T: shared::Config, -{ - >::disabled_validators() -} - -/// Returns the current state of the node features. -pub fn node_features() -> NodeFeatures { - >::config().node_features -} - -/// Approval voting subsystem configuration parameters -pub fn approval_voting_params() -> ApprovalVotingParams { - let config = >::config(); - config.approval_voting_params -} +use crate::scheduler; +use primitives::{CoreIndex, Id as ParaId}; +use sp_std::collections::{btree_map::BTreeMap, vec_deque::VecDeque}; /// Returns the claimqueue from the scheduler pub fn claim_queue() -> BTreeMap> { diff --git a/polkadot/runtime/parachains/src/scheduler/migration.rs b/polkadot/runtime/parachains/src/scheduler/migration.rs index 4c0a07d73674205dbc4fc80090f1331a8858633e..b030940fb41da8c8b85bce4fb14cf68723924058 100644 --- a/polkadot/runtime/parachains/src/scheduler/migration.rs +++ b/polkadot/runtime/parachains/src/scheduler/migration.rs @@ -19,12 +19,12 @@ use super::*; use frame_support::{ migrations::VersionedMigration, pallet_prelude::ValueQuery, storage_alias, - traits::OnRuntimeUpgrade, weights::Weight, + traits::UncheckedOnRuntimeUpgrade, weights::Weight, }; /// Old/legacy assignment representation (v0). /// -/// `Assignment` used to be a concrete type with the same layout V0Assignment, idential on all +/// `Assignment` used to be a concrete type with the same layout V0Assignment, identical on all /// assignment providers. This can be removed once storage has been migrated. #[derive(Encode, Decode, RuntimeDebug, TypeInfo, PartialEq, Clone)] struct V0Assignment { @@ -105,7 +105,8 @@ mod v0 { // - Assignments only consist of `ParaId`, `Assignment` is a concrete type (Same as V0Assignment). mod v1 { use frame_support::{ - pallet_prelude::ValueQuery, storage_alias, traits::OnRuntimeUpgrade, weights::Weight, + pallet_prelude::ValueQuery, storage_alias, traits::UncheckedOnRuntimeUpgrade, + weights::Weight, }; use frame_system::pallet_prelude::BlockNumberFor; @@ -164,7 +165,7 @@ mod v1 { /// Migration to V1 pub struct UncheckedMigrateToV1(sp_std::marker::PhantomData); - impl OnRuntimeUpgrade for UncheckedMigrateToV1 { + impl UncheckedOnRuntimeUpgrade for UncheckedMigrateToV1 { fn on_runtime_upgrade() -> Weight { let mut weight: Weight = Weight::zero(); @@ -302,7 +303,7 @@ mod v2 { /// Migration to V2 pub struct UncheckedMigrateToV2(sp_std::marker::PhantomData); - impl OnRuntimeUpgrade for UncheckedMigrateToV2 { + impl UncheckedOnRuntimeUpgrade for UncheckedMigrateToV2 { fn on_runtime_upgrade() -> Weight { let mut weight: Weight = Weight::zero(); diff --git a/polkadot/runtime/parachains/src/scheduler/tests.rs b/polkadot/runtime/parachains/src/scheduler/tests.rs index e6a1e66b3a531906fa6cbf763f248f6f6108a0f3..720f645e3bdc148819b3eab00b0f54f706367f9d 100644 --- a/polkadot/runtime/parachains/src/scheduler/tests.rs +++ b/polkadot/runtime/parachains/src/scheduler/tests.rs @@ -836,7 +836,7 @@ fn on_demand_claims_are_pruned_after_timing_out() { // #26 now += 1; - // Run to block #n but this time have group 1 conclude the availabilty. + // Run to block #n but this time have group 1 conclude the availability. for n in now..=(now + max_timeouts + 1) { // #n run_to_block(n, |_| None); diff --git a/polkadot/runtime/parachains/src/session_info.rs b/polkadot/runtime/parachains/src/session_info.rs index 9e1b3d05842fd7b35f87de5c297958f5f3f36bf3..4ca5a73d42b99378838927d9a67a60ea83c05a07 100644 --- a/polkadot/runtime/parachains/src/session_info.rs +++ b/polkadot/runtime/parachains/src/session_info.rs @@ -158,7 +158,7 @@ impl Pallet { for idx in old_earliest_stored_session..new_earliest_stored_session { Sessions::::remove(&idx); // Idx will be missing for a few sessions after the runtime upgrade. - // But it shouldn'be be a problem. + // But it shouldn't be a problem. AccountKeys::::remove(&idx); SessionExecutorParams::::remove(&idx); } diff --git a/polkadot/runtime/parachains/src/ump_tests.rs b/polkadot/runtime/parachains/src/ump_tests.rs index 2ed1d64336b31e42d1fd0363143a7d17a66c7b97..5867a8fca66387743618ff1bebfebe19a14a0c41 100644 --- a/polkadot/runtime/parachains/src/ump_tests.rs +++ b/polkadot/runtime/parachains/src/ump_tests.rs @@ -456,7 +456,7 @@ fn verify_relay_dispatch_queue_size_is_externally_accessible() { fn assert_queue_size(para: ParaId, count: u32, size: u32) { #[allow(deprecated)] let raw_queue_size = sp_io::storage::get(&well_known_keys::relay_dispatch_queue_size(para)).expect( - "enqueing a message should create the dispatch queue\ + "enqueuing a message should create the dispatch queue\ and it should be accessible via the well known keys", ); let (c, s) = <(u32, u32)>::decode(&mut &raw_queue_size[..]) @@ -466,7 +466,7 @@ fn assert_queue_size(para: ParaId, count: u32, size: u32) { // Test the deprecated but at least type-safe `relay_dispatch_queue_size_typed`: #[allow(deprecated)] let (c, s) = well_known_keys::relay_dispatch_queue_size_typed(para).get().expect( - "enqueing a message should create the dispatch queue\ + "enqueuing a message should create the dispatch queue\ and it should be accessible via the well known keys", ); assert_eq!((c, s), (count, size)); diff --git a/polkadot/runtime/rococo/Cargo.toml b/polkadot/runtime/rococo/Cargo.toml index 3dc59cc172817409bd607f5b68e52163a7b9afdb..19cc984e5829db4ddae3b61fe43dba8506419fca 100644 --- a/polkadot/runtime/rococo/Cargo.toml +++ b/polkadot/runtime/rococo/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } log = { workspace = true } serde = { workspace = true } serde_derive = { optional = true, workspace = true } @@ -60,7 +60,6 @@ pallet-asset-rate = { path = "../../../substrate/frame/asset-rate", default-feat frame-executive = { path = "../../../substrate/frame/executive", default-features = false } pallet-grandpa = { path = "../../../substrate/frame/grandpa", default-features = false } pallet-identity = { path = "../../../substrate/frame/identity", default-features = false } -pallet-im-online = { path = "../../../substrate/frame/im-online", default-features = false } pallet-indices = { path = "../../../substrate/frame/indices", default-features = false } pallet-membership = { path = "../../../substrate/frame/membership", default-features = false } pallet-message-queue = { path = "../../../substrate/frame/message-queue", default-features = false } @@ -104,6 +103,7 @@ polkadot-parachain-primitives = { path = "../../parachain", default-features = f xcm = { package = "staging-xcm", path = "../../xcm", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../../xcm/xcm-executor", default-features = false } xcm-builder = { package = "staging-xcm-builder", path = "../../xcm/xcm-builder", default-features = false } +xcm-fee-payment-runtime-api = { path = "../../xcm/xcm-fee-payment-runtime-api", default-features = false } [dev-dependencies] tiny-keccak = { version = "2.0.2", features = ["keccak"] } @@ -152,7 +152,6 @@ std = [ "pallet-elections-phragmen/std", "pallet-grandpa/std", "pallet-identity/std", - "pallet-im-online/std", "pallet-indices/std", "pallet-membership/std", "pallet-message-queue/std", @@ -208,6 +207,7 @@ std = [ "tx-pool-api/std", "xcm-builder/std", "xcm-executor/std", + "xcm-fee-payment-runtime-api/std", "xcm/std", ] runtime-benchmarks = [ @@ -226,7 +226,6 @@ runtime-benchmarks = [ "pallet-elections-phragmen/runtime-benchmarks", "pallet-grandpa/runtime-benchmarks", "pallet-identity/runtime-benchmarks", - "pallet-im-online/runtime-benchmarks", "pallet-indices/runtime-benchmarks", "pallet-membership/runtime-benchmarks", "pallet-message-queue/runtime-benchmarks", @@ -282,7 +281,6 @@ try-runtime = [ "pallet-elections-phragmen/try-runtime", "pallet-grandpa/try-runtime", "pallet-identity/try-runtime", - "pallet-im-online/try-runtime", "pallet-indices/try-runtime", "pallet-membership/try-runtime", "pallet-message-queue/try-runtime", diff --git a/polkadot/runtime/rococo/README.md b/polkadot/runtime/rococo/README.md index 5b2c296f0ced9a053de46bac5d8207d7a1d835d8..c19c3654fe4d35717283a5ad5fe1f2546a0d0f3a 100644 --- a/polkadot/runtime/rococo/README.md +++ b/polkadot/runtime/rococo/README.md @@ -4,7 +4,7 @@ Rococo is a testnet runtime with no stability guarantees. ## How to build `rococo` runtime `EpochDurationInBlocks` parameter is configurable via `ROCOCO_EPOCH_DURATION` environment variable. To build wasm -runtime blob with customized epoch duration the following command shall be exectuted: +runtime blob with customized epoch duration the following command shall be executed: ```bash ROCOCO_EPOCH_DURATION=10 ./polkadot/scripts/build-only-wasm.sh rococo-runtime /path/to/output/directory/ ``` diff --git a/polkadot/runtime/rococo/src/impls.rs b/polkadot/runtime/rococo/src/impls.rs index ac7100d7858377dca5991e0d0308dc64577b9350..cf364b6ac7942753b6fd5f47dbf621d70c7ebcf7 100644 --- a/polkadot/runtime/rococo/src/impls.rs +++ b/polkadot/runtime/rococo/src/impls.rs @@ -167,11 +167,16 @@ where }, ]); + let encoded_versioned_xcm = + VersionedXcm::V4(program).encode().try_into().map_err(|error| { + log::error!(target: "runtime::on_reap_identity", "XCM too large, error: {:?}", error); + pallet_xcm::Error::::XcmTooLarge + })?; // send - let _ = >::send( + let _ = >::send_blob( RawOrigin::Root.into(), Box::new(VersionedLocation::V4(destination)), - Box::new(VersionedXcm::V4(program)), + encoded_versioned_xcm, )?; Ok(()) } diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 90824a2f6f0842a7b8aa3a37f9385671170e03b6..7d16d2dbf1655d2c9563c247896afc42ca7714f7 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -23,13 +23,12 @@ use pallet_nis::WithMaximumOf; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use primitives::{ - slashing, - vstaging::{ApprovalVotingParams, NodeFeatures}, - AccountId, AccountIndex, Balance, BlockNumber, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, - Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, Moment, Nonce, - OccupiedCoreAssumption, PersistedValidationData, ScrapedOnChainVotes, SessionInfo, Signature, - ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, PARACHAIN_KEY_TYPE_ID, + slashing, AccountId, AccountIndex, ApprovalVotingParams, Balance, BlockNumber, CandidateEvent, + CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, + GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, Moment, + NodeFeatures, Nonce, OccupiedCoreAssumption, PersistedValidationData, ScrapedOnChainVotes, + SessionInfo, Signature, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, + PARACHAIN_KEY_TYPE_ID, }; use rococo_runtime_constants::system_parachain::BROKER_ID; use runtime_common::{ @@ -38,7 +37,7 @@ use runtime_common::{ LocatableAssetConverter, ToAuthor, VersionedLocatableAsset, VersionedLocationConverter, }, paras_registrar, paras_sudo_wrapper, prod_or_fast, slots, - traits::Leaser, + traits::{Leaser, OnSwap}, BlockHashCount, BlockLength, SlowAdjustingFeeUpdate, }; use scale_info::TypeInfo; @@ -53,9 +52,7 @@ use runtime_parachains::{ inclusion::{AggregateMessageOrigin, UmpQueueId}, initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras, paras_inherent as parachains_paras_inherent, - runtime_api_impl::{ - v7 as parachains_runtime_api_impl, vstaging as parachains_staging_runtime_api_impl, - }, + runtime_api_impl::v10 as parachains_runtime_api_impl, scheduler as parachains_scheduler, session_info as parachains_session_info, shared as parachains_shared, }; @@ -75,7 +72,7 @@ use frame_support::{ InstanceFilter, KeyOwnerProofSystem, LinearStoragePrice, PrivilegeCmp, ProcessMessage, ProcessMessageError, StorageMapShim, WithdrawReasons, }, - weights::{ConstantMultiplier, WeightMeter}, + weights::{ConstantMultiplier, WeightMeter, WeightToFee as _}, PalletId, }; use frame_system::{EnsureRoot, EnsureSigned}; @@ -91,14 +88,16 @@ use sp_runtime::{ IdentityLookup, Keccak256, OpaqueKeys, SaturatedConversion, Verify, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, BoundToRuntimeAppPublic, FixedU128, KeyTypeId, Perbill, Percent, Permill, - RuntimeAppPublic, RuntimeDebug, + ApplyExtrinsicResult, FixedU128, KeyTypeId, Perbill, Percent, Permill, RuntimeDebug, }; use sp_staking::SessionIndex; #[cfg(any(feature = "std", test))] use sp_version::NativeVersion; use sp_version::RuntimeVersion; -use xcm::{latest::prelude::*, VersionedLocation}; +use xcm::{ + latest::prelude::*, IntoVersion, VersionedAssetId, VersionedAssets, VersionedLocation, + VersionedXcm, +}; use xcm_builder::PayOverXcm; pub use frame_system::Call as SystemCall; @@ -123,6 +122,7 @@ use governance::{ pallet_custom_origins, AuctionAdmin, Fellows, GeneralAdmin, LeaseAdmin, Treasurer, TreasurySpender, }; +use xcm_fee_payment_runtime_api::Error as XcmPaymentApiError; #[cfg(test)] mod tests; @@ -216,7 +216,7 @@ pub struct OriginPrivilegeCmp; impl PrivilegeCmp for OriginPrivilegeCmp { fn cmp_privilege(left: &OriginCaller, right: &OriginCaller) -> Option { if left == right { - return Some(Ordering::Equal) + return Some(Ordering::Equal); } match (left, right) { @@ -346,46 +346,6 @@ impl pallet_authorship::Config for Runtime { type EventHandler = (); } -#[derive(Clone, Debug, PartialEq, Eq, Encode, Decode)] -pub struct OldSessionKeys { - pub grandpa: ::Public, - pub babe: ::Public, - pub im_online: pallet_im_online::sr25519::AuthorityId, - pub para_validator: ::Public, - pub para_assignment: ::Public, - pub authority_discovery: ::Public, - pub beefy: ::Public, -} - -impl OpaqueKeys for OldSessionKeys { - type KeyTypeIdProviders = (); - fn key_ids() -> &'static [KeyTypeId] { - &[ - <::Public>::ID, - <::Public>::ID, - sp_core::crypto::key_types::IM_ONLINE, - <::Public>::ID, - <::Public>::ID, - <::Public>::ID, - <::Public>::ID, - ] - } - fn get_raw(&self, i: KeyTypeId) -> &[u8] { - match i { - <::Public>::ID => self.grandpa.as_ref(), - <::Public>::ID => self.babe.as_ref(), - sp_core::crypto::key_types::IM_ONLINE => self.im_online.as_ref(), - <::Public>::ID => self.para_validator.as_ref(), - <::Public>::ID => - self.para_assignment.as_ref(), - <::Public>::ID => - self.authority_discovery.as_ref(), - <::Public>::ID => self.beefy.as_ref(), - _ => &[], - } - } -} - impl_opaque_keys! { pub struct SessionKeys { pub grandpa: Grandpa, @@ -397,18 +357,6 @@ impl_opaque_keys! { } } -// remove this when removing `OldSessionKeys` -fn transform_session_keys(_val: AccountId, old: OldSessionKeys) -> SessionKeys { - SessionKeys { - grandpa: old.grandpa, - babe: old.babe, - para_validator: old.para_validator, - para_assignment: old.para_assignment, - authority_discovery: old.authority_discovery, - beefy: old.beefy, - } -} - /// Special `ValidatorIdOf` implementation that is just returning the input as result. pub struct ValidatorIdOf; impl sp_runtime::traits::Convert> for ValidatorIdOf { @@ -983,6 +931,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = MessageQueueHeapSize; type MaxStale = MessageQueueMaxStale; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; #[cfg(not(feature = "runtime-benchmarks"))] type MessageProcessor = MessageProcessor; #[cfg(feature = "runtime-benchmarks")] @@ -1078,7 +1027,7 @@ impl paras_registrar::Config for Runtime { type RuntimeOrigin = RuntimeOrigin; type RuntimeEvent = RuntimeEvent; type Currency = Balances; - type OnSwap = (Crowdloan, Slots); + type OnSwap = (Crowdloan, Slots, SwapLeases); type ParaDeposit = ParaDeposit; type DataDepositPerByte = DataDepositPerByte; type WeightInfo = weights::runtime_common_paras_registrar::WeightInfo; @@ -1306,6 +1255,14 @@ impl pallet_asset_rate::Config for Runtime { type BenchmarkHelper = runtime_common::impls::benchmarks::AssetRateArguments; } +// Notify `coretime` pallet when a lease swap occurs +pub struct SwapLeases; +impl OnSwap for SwapLeases { + fn on_swap(one: ParaId, other: ParaId) { + coretime::Pallet::::on_legacy_lease_swap(one, other); + } +} + construct_runtime! { pub enum Runtime { @@ -1476,8 +1433,6 @@ pub mod migrations { use frame_support::traits::LockIdentifier; use frame_system::pallet_prelude::BlockNumberFor; - #[cfg(feature = "try-runtime")] - use sp_core::crypto::ByteArray; pub struct GetLegacyLeaseImpl; impl coretime::migration::GetLegacyLease for GetLegacyLeaseImpl { @@ -1485,11 +1440,11 @@ pub mod migrations { let now = frame_system::Pallet::::block_number(); let lease = slots::Pallet::::lease(para); if lease.is_empty() { - return None + return None; } // Lease not yet started, ignore: if lease.iter().any(Option::is_none) { - return None + return None; } let (index, _) = as Leaser>::lease_period_index(now)?; @@ -1504,7 +1459,6 @@ pub mod migrations { pub const PhragmenElectionPalletName: &'static str = "PhragmenElection"; pub const TechnicalMembershipPalletName: &'static str = "TechnicalMembership"; pub const TipsPalletName: &'static str = "Tips"; - pub const ImOnlinePalletName: &'static str = "ImOnline"; pub const PhragmenElectionPalletId: LockIdentifier = *b"phrelect"; } @@ -1541,79 +1495,6 @@ pub mod migrations { type PalletName = TipsPalletName; } - /// Upgrade Session keys to exclude `ImOnline` key. - /// When this is removed, should also remove `OldSessionKeys`. - pub struct UpgradeSessionKeys; - const UPGRADE_SESSION_KEYS_FROM_SPEC: u32 = 104000; - - impl frame_support::traits::OnRuntimeUpgrade for UpgradeSessionKeys { - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { - if System::last_runtime_upgrade_spec_version() > UPGRADE_SESSION_KEYS_FROM_SPEC { - log::warn!(target: "runtime::session_keys", "Skipping session keys migration pre-upgrade check due to spec version (already applied?)"); - return Ok(Vec::new()) - } - - log::info!(target: "runtime::session_keys", "Collecting pre-upgrade session keys state"); - let key_ids = SessionKeys::key_ids(); - frame_support::ensure!( - key_ids.into_iter().find(|&k| *k == sp_core::crypto::key_types::IM_ONLINE) == None, - "New session keys contain the ImOnline key that should have been removed", - ); - let storage_key = pallet_session::QueuedKeys::::hashed_key(); - let mut state: Vec = Vec::new(); - frame_support::storage::unhashed::get::>( - &storage_key, - ) - .ok_or::("Queued keys are not available".into())? - .into_iter() - .for_each(|(id, keys)| { - state.extend_from_slice(id.as_slice()); - for key_id in key_ids { - state.extend_from_slice(keys.get_raw(*key_id)); - } - }); - frame_support::ensure!(state.len() > 0, "Queued keys are not empty before upgrade"); - Ok(state) - } - - fn on_runtime_upgrade() -> Weight { - if System::last_runtime_upgrade_spec_version() > UPGRADE_SESSION_KEYS_FROM_SPEC { - log::info!("Skipping session keys upgrade: already applied"); - return ::DbWeight::get().reads(1) - } - log::trace!("Upgrading session keys"); - Session::upgrade_keys::(transform_session_keys); - Perbill::from_percent(50) * BlockWeights::get().max_block - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade( - old_state: sp_std::vec::Vec, - ) -> Result<(), sp_runtime::TryRuntimeError> { - if System::last_runtime_upgrade_spec_version() > UPGRADE_SESSION_KEYS_FROM_SPEC { - log::warn!(target: "runtime::session_keys", "Skipping session keys migration post-upgrade check due to spec version (already applied?)"); - return Ok(()) - } - - let key_ids = SessionKeys::key_ids(); - let mut new_state: Vec = Vec::new(); - pallet_session::QueuedKeys::::get().into_iter().for_each(|(id, keys)| { - new_state.extend_from_slice(id.as_slice()); - for key_id in key_ids { - new_state.extend_from_slice(keys.get_raw(*key_id)); - } - }); - frame_support::ensure!(new_state.len() > 0, "Queued keys are not empty after upgrade"); - frame_support::ensure!( - old_state == new_state, - "Pre-upgrade and post-upgrade keys do not match!" - ); - log::info!(target: "runtime::session_keys", "Session keys migrated successfully"); - Ok(()) - } - } - // We don't have a limit in the Relay Chain. const IDENTITY_MIGRATION_KEY_LIMIT: u64 = u64::MAX; @@ -1647,12 +1528,6 @@ pub mod migrations { pallet_grandpa::migrations::MigrateV4ToV5, parachains_configuration::migration::v10::MigrateToV10, - // Upgrade `SessionKeys` to exclude `ImOnline` - UpgradeSessionKeys, - - // Remove `im-online` pallet on-chain storage - frame_support::migrations::RemovePallet::DbWeight>, - // Migrate Identity pallet for Usernames pallet_identity::migration::versioned::V0ToV1, parachains_configuration::migration::v11::MigrateToV11, @@ -1777,6 +1652,37 @@ sp_api::impl_runtime_apis! { } } + impl xcm_fee_payment_runtime_api::XcmPaymentApi for Runtime { + fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { + if !matches!(xcm_version, 3 | 4) { + return Err(XcmPaymentApiError::UnhandledXcmVersion); + } + Ok([VersionedAssetId::V4(xcm_config::TokenLocation::get().into())] + .into_iter() + .filter_map(|asset| asset.into_version(xcm_version).ok()) + .collect()) + } + + fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { + let local_asset = VersionedAssetId::V4(xcm_config::TokenLocation::get().into()); + let asset = asset + .into_version(4) + .map_err(|_| XcmPaymentApiError::VersionedConversionFailed)?; + + if asset != local_asset { return Err(XcmPaymentApiError::AssetNotFound); } + + Ok(WeightToFee::weight_to_fee(&weight)) + } + + fn query_xcm_weight(message: VersionedXcm<()>) -> Result { + XcmPallet::query_xcm_weight(message) + } + + fn query_delivery_fees(destination: VersionedLocation, message: VersionedXcm<()>) -> Result { + XcmPallet::query_delivery_fees(destination, message) + } + } + impl sp_api::Metadata for Runtime { fn metadata() -> OpaqueMetadata { OpaqueMetadata::new(Runtime::metadata().into()) @@ -1824,12 +1730,6 @@ sp_api::impl_runtime_apis! { impl offchain_primitives::OffchainWorkerApi for Runtime { fn offchain_worker(header: &::Header) { - use sp_runtime::{traits::Header, DigestItem}; - - if header.digest().logs().iter().any(|di| di == &DigestItem::RuntimeEnvironmentUpdated) { - pallet_im_online::migration::clear_offchain_storage(Session::validators().len() as u32); - } - Executive::offchain_worker(header) } } @@ -1979,15 +1879,15 @@ sp_api::impl_runtime_apis! { } fn approval_voting_params() -> ApprovalVotingParams { - parachains_staging_runtime_api_impl::approval_voting_params::() + parachains_runtime_api_impl::approval_voting_params::() } fn disabled_validators() -> Vec { - parachains_staging_runtime_api_impl::disabled_validators::() + parachains_runtime_api_impl::disabled_validators::() } fn node_features() -> NodeFeatures { - parachains_staging_runtime_api_impl::node_features::() + parachains_runtime_api_impl::node_features::() } } @@ -2485,7 +2385,7 @@ mod remote_tests { #[tokio::test] async fn run_migrations() { if var("RUN_MIGRATION_TESTS").is_err() { - return + return; } sp_tracing::try_init_simple(); diff --git a/polkadot/runtime/rococo/src/weights/pallet_xcm.rs b/polkadot/runtime/rococo/src/weights/pallet_xcm.rs index 5544ca44658cc09fa51cd75679ba90132eb16444..42972baa1c830baa820fc5c72b35f346daaf27d0 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_xcm.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -60,8 +60,26 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3645` - // Minimum execution time: 25_043_000 picoseconds. - Weight::from_parts(25_682_000, 0) + // Minimum execution time: 24_724_000 picoseconds. + Weight::from_parts(25_615_000, 0) + .saturating_add(Weight::from_parts(0, 3645)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn send_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 24_709_000 picoseconds. + Weight::from_parts(25_326_000, 0) .saturating_add(Weight::from_parts(0, 3645)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -80,8 +98,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3645` - // Minimum execution time: 107_570_000 picoseconds. - Weight::from_parts(109_878_000, 0) + // Minimum execution time: 106_600_000 picoseconds. + Weight::from_parts(110_781_000, 0) .saturating_add(Weight::from_parts(0, 3645)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) @@ -100,8 +118,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `232` // Estimated: `3697` - // Minimum execution time: 106_341_000 picoseconds. - Weight::from_parts(109_135_000, 0) + // Minimum execution time: 103_030_000 picoseconds. + Weight::from_parts(106_018_000, 0) .saturating_add(Weight::from_parts(0, 3697)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) @@ -120,8 +138,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3645` - // Minimum execution time: 108_372_000 picoseconds. - Weight::from_parts(112_890_000, 0) + // Minimum execution time: 107_017_000 picoseconds. + Weight::from_parts(109_214_000, 0) .saturating_add(Weight::from_parts(0, 3645)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) @@ -130,8 +148,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_957_000 picoseconds. - Weight::from_parts(7_417_000, 0) + // Minimum execution time: 6_864_000 picoseconds. + Weight::from_parts(7_135_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn execute_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_955_000 picoseconds. + Weight::from_parts(7_165_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `XcmPallet::SupportedVersion` (r:0 w:1) @@ -140,8 +166,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_053_000 picoseconds. - Weight::from_parts(7_462_000, 0) + // Minimum execution time: 6_827_000 picoseconds. + Weight::from_parts(7_211_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -149,8 +175,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_918_000 picoseconds. - Weight::from_parts(2_037_000, 0) + // Minimum execution time: 1_788_000 picoseconds. + Weight::from_parts(2_021_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) @@ -171,8 +197,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3645` - // Minimum execution time: 30_417_000 picoseconds. - Weight::from_parts(31_191_000, 0) + // Minimum execution time: 30_627_000 picoseconds. + Weight::from_parts(31_350_000, 0) .saturating_add(Weight::from_parts(0, 3645)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) @@ -193,8 +219,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `360` // Estimated: `3825` - // Minimum execution time: 36_666_000 picoseconds. - Weight::from_parts(37_779_000, 0) + // Minimum execution time: 36_688_000 picoseconds. + Weight::from_parts(37_345_000, 0) .saturating_add(Weight::from_parts(0, 3825)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) @@ -205,8 +231,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_869_000 picoseconds. - Weight::from_parts(2_003_000, 0) + // Minimum execution time: 1_829_000 picoseconds. + Weight::from_parts(1_986_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -216,8 +242,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `22` // Estimated: `13387` - // Minimum execution time: 16_188_000 picoseconds. - Weight::from_parts(16_435_000, 0) + // Minimum execution time: 16_104_000 picoseconds. + Weight::from_parts(16_464_000, 0) .saturating_add(Weight::from_parts(0, 13387)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -228,8 +254,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `26` // Estimated: `13391` - // Minimum execution time: 16_431_000 picoseconds. - Weight::from_parts(16_935_000, 0) + // Minimum execution time: 16_267_000 picoseconds. + Weight::from_parts(16_675_000, 0) .saturating_add(Weight::from_parts(0, 13391)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -240,8 +266,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `40` // Estimated: `15880` - // Minimum execution time: 18_460_000 picoseconds. - Weight::from_parts(18_885_000, 0) + // Minimum execution time: 18_487_000 picoseconds. + Weight::from_parts(19_102_000, 0) .saturating_add(Weight::from_parts(0, 15880)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -259,8 +285,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `216` // Estimated: `6156` - // Minimum execution time: 29_623_000 picoseconds. - Weight::from_parts(30_661_000, 0) + // Minimum execution time: 29_603_000 picoseconds. + Weight::from_parts(31_002_000, 0) .saturating_add(Weight::from_parts(0, 6156)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) @@ -271,8 +297,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `69` // Estimated: `10959` - // Minimum execution time: 12_043_000 picoseconds. - Weight::from_parts(12_360_000, 0) + // Minimum execution time: 12_183_000 picoseconds. + Weight::from_parts(12_587_000, 0) .saturating_add(Weight::from_parts(0, 10959)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -282,8 +308,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `33` // Estimated: `13398` - // Minimum execution time: 16_511_000 picoseconds. - Weight::from_parts(17_011_000, 0) + // Minimum execution time: 16_372_000 picoseconds. + Weight::from_parts(16_967_000, 0) .saturating_add(Weight::from_parts(0, 13398)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -302,8 +328,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `216` // Estimated: `13581` - // Minimum execution time: 39_041_000 picoseconds. - Weight::from_parts(39_883_000, 0) + // Minimum execution time: 38_904_000 picoseconds. + Weight::from_parts(39_983_000, 0) .saturating_add(Weight::from_parts(0, 13581)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) @@ -316,8 +342,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 2_030_000 picoseconds. - Weight::from_parts(2_150_000, 0) + // Minimum execution time: 2_067_000 picoseconds. + Weight::from_parts(2_195_000, 0) .saturating_add(Weight::from_parts(0, 1485)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -328,8 +354,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7576` // Estimated: `11041` - // Minimum execution time: 22_615_000 picoseconds. - Weight::from_parts(23_008_000, 0) + // Minimum execution time: 23_982_000 picoseconds. + Weight::from_parts(24_409_000, 0) .saturating_add(Weight::from_parts(0, 11041)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -340,8 +366,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `23` // Estimated: `3488` - // Minimum execution time: 34_438_000 picoseconds. - Weight::from_parts(35_514_000, 0) + // Minimum execution time: 33_430_000 picoseconds. + Weight::from_parts(34_433_000, 0) .saturating_add(Weight::from_parts(0, 3488)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/test-runtime/Cargo.toml b/polkadot/runtime/test-runtime/Cargo.toml index 9753a4093045b52c8c709cfcea4a19a15dc510d6..35fb684597e7d3f8082478353a643ee2adf2481d 100644 --- a/polkadot/runtime/test-runtime/Cargo.toml +++ b/polkadot/runtime/test-runtime/Cargo.toml @@ -15,7 +15,7 @@ bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } rustc-hex = { version = "2.1.0", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { workspace = true } serde_derive = { optional = true, workspace = true } smallvec = "1.8.0" diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 8a3cd9309dbd84bb9f6c92d9ef0dabb13a36439a..446cd101efffb81c60fc44fa96130777ed027203 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -30,7 +30,7 @@ use polkadot_runtime_parachains::{ disputes::slashing as parachains_slashing, dmp as parachains_dmp, hrmp as parachains_hrmp, inclusion as parachains_inclusion, initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras, - paras_inherent as parachains_paras_inherent, runtime_api_impl::v7 as runtime_impl, + paras_inherent as parachains_paras_inherent, runtime_api_impl::v10 as runtime_impl, scheduler as parachains_scheduler, session_info as parachains_session_info, shared as parachains_shared, }; @@ -829,6 +829,7 @@ sp_api::impl_runtime_apis! { } } + #[api_version(10)] impl primitives::runtime_api::ParachainHost for Runtime { fn validators() -> Vec { runtime_impl::validators::() @@ -956,6 +957,30 @@ sp_api::impl_runtime_apis! { key_ownership_proof, ) } + + fn minimum_backing_votes() -> u32 { + runtime_impl::minimum_backing_votes::() + } + + fn para_backing_state(para_id: ParaId) -> Option { + runtime_impl::backing_state::(para_id) + } + + fn async_backing_params() -> primitives::AsyncBackingParams { + runtime_impl::async_backing_params::() + } + + fn approval_voting_params() -> primitives::ApprovalVotingParams { + runtime_impl::approval_voting_params::() + } + + fn disabled_validators() -> Vec { + runtime_impl::disabled_validators::() + } + + fn node_features() -> primitives::NodeFeatures { + runtime_impl::node_features::() + } } impl beefy_primitives::BeefyApi for Runtime { diff --git a/polkadot/runtime/westend/Cargo.toml b/polkadot/runtime/westend/Cargo.toml index fcead1dd0b532b17bde7d048fad10cf5f7758d89..d726adfb8e6e439edd2b5f378f5b5160ac70e6be 100644 --- a/polkadot/runtime/westend/Cargo.toml +++ b/polkadot/runtime/westend/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } log = { workspace = true } rustc-hex = { version = "2.1.0", default-features = false } serde = { workspace = true } @@ -64,7 +64,6 @@ pallet-election-provider-multi-phase = { path = "../../../substrate/frame/electi pallet-fast-unstake = { path = "../../../substrate/frame/fast-unstake", default-features = false } pallet-grandpa = { path = "../../../substrate/frame/grandpa", default-features = false } pallet-identity = { path = "../../../substrate/frame/identity", default-features = false } -pallet-im-online = { path = "../../../substrate/frame/im-online", default-features = false } pallet-indices = { path = "../../../substrate/frame/indices", default-features = false } pallet-membership = { path = "../../../substrate/frame/membership", default-features = false } pallet-message-queue = { path = "../../../substrate/frame/message-queue", default-features = false } @@ -114,6 +113,7 @@ runtime-parachains = { package = "polkadot-runtime-parachains", path = "../parac xcm = { package = "staging-xcm", path = "../../xcm", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../../xcm/xcm-executor", default-features = false } xcm-builder = { package = "staging-xcm-builder", path = "../../xcm/xcm-builder", default-features = false } +xcm-fee-payment-runtime-api = { path = "../../xcm/xcm-fee-payment-runtime-api", default-features = false } [dev-dependencies] hex-literal = "0.4.1" @@ -166,7 +166,6 @@ std = [ "pallet-fast-unstake/std", "pallet-grandpa/std", "pallet-identity/std", - "pallet-im-online/std", "pallet-indices/std", "pallet-membership/std", "pallet-message-queue/std", @@ -227,6 +226,7 @@ std = [ "westend-runtime-constants/std", "xcm-builder/std", "xcm-executor/std", + "xcm-fee-payment-runtime-api/std", "xcm/std", ] runtime-benchmarks = [ @@ -249,7 +249,6 @@ runtime-benchmarks = [ "pallet-fast-unstake/runtime-benchmarks", "pallet-grandpa/runtime-benchmarks", "pallet-identity/runtime-benchmarks", - "pallet-im-online/runtime-benchmarks", "pallet-indices/runtime-benchmarks", "pallet-membership/runtime-benchmarks", "pallet-message-queue/runtime-benchmarks", @@ -308,7 +307,6 @@ try-runtime = [ "pallet-fast-unstake/try-runtime", "pallet-grandpa/try-runtime", "pallet-identity/try-runtime", - "pallet-im-online/try-runtime", "pallet-indices/try-runtime", "pallet-membership/try-runtime", "pallet-message-queue/try-runtime", diff --git a/polkadot/runtime/westend/src/impls.rs b/polkadot/runtime/westend/src/impls.rs index 71e6b696a20a0feb89e669067d02b12e6eeb89fd..d8741c939a507d49c181b3620d322c08692d730b 100644 --- a/polkadot/runtime/westend/src/impls.rs +++ b/polkadot/runtime/westend/src/impls.rs @@ -167,11 +167,16 @@ where }, ]); + let encoded_versioned_xcm = + VersionedXcm::V4(program).encode().try_into().map_err(|error| { + log::error!(target: "runtime::on_reap_identity", "XCM too large, error: {:?}", error); + pallet_xcm::Error::::XcmTooLarge + })?; // send - let _ = >::send( + let _ = >::send_blob( RawOrigin::Root.into(), Box::new(VersionedLocation::V4(destination)), - Box::new(VersionedXcm::V4(program)), + encoded_versioned_xcm, )?; Ok(()) } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 664044b713eef80e646e1eecb484d9da4202938a..9445e27f0e51c016ed2744ebc930bb8dd046d875 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -35,7 +35,7 @@ use frame_support::{ InstanceFilter, KeyOwnerProofSystem, LinearStoragePrice, ProcessMessage, ProcessMessageError, WithdrawReasons, }, - weights::{ConstantMultiplier, WeightMeter}, + weights::{ConstantMultiplier, WeightMeter, WeightToFee as _}, PalletId, }; use frame_system::{EnsureRoot, EnsureSigned}; @@ -45,14 +45,12 @@ use pallet_session::historical as session_historical; use pallet_transaction_payment::{CurrencyAdapter, FeeDetails, RuntimeDispatchInfo}; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use primitives::{ - slashing, - vstaging::{ApprovalVotingParams, NodeFeatures}, - AccountId, AccountIndex, Balance, BlockNumber, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, - Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, Moment, Nonce, - OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, - SessionInfo, Signature, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, - ValidatorSignature, PARACHAIN_KEY_TYPE_ID, + slashing, AccountId, AccountIndex, ApprovalVotingParams, Balance, BlockNumber, CandidateEvent, + CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, + GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, Moment, + NodeFeatures, Nonce, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, + ScrapedOnChainVotes, SessionInfo, Signature, ValidationCode, ValidationCodeHash, ValidatorId, + ValidatorIndex, ValidatorSignature, PARACHAIN_KEY_TYPE_ID, }; use runtime_common::{ assigned_slots, auctions, crowdloan, @@ -62,7 +60,7 @@ use runtime_common::{ LocatableAssetConverter, ToAuthor, VersionedLocatableAsset, VersionedLocationConverter, }, paras_registrar, paras_sudo_wrapper, prod_or_fast, slots, - traits::Leaser, + traits::{Leaser, OnSwap}, BalanceToU256, BlockHashCount, BlockLength, CurrencyToVote, SlowAdjustingFeeUpdate, U256ToBalance, }; @@ -75,9 +73,7 @@ use runtime_parachains::{ inclusion::{AggregateMessageOrigin, UmpQueueId}, initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras, paras_inherent as parachains_paras_inherent, reward_points as parachains_reward_points, - runtime_api_impl::{ - v7 as parachains_runtime_api_impl, vstaging as parachains_staging_runtime_api_impl, - }, + runtime_api_impl::v10 as parachains_runtime_api_impl, scheduler as parachains_scheduler, session_info as parachains_session_info, shared as parachains_shared, }; @@ -92,8 +88,7 @@ use sp_runtime::{ Keccak256, OpaqueKeys, SaturatedConversion, Verify, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, BoundToRuntimeAppPublic, FixedU128, KeyTypeId, Perbill, Percent, Permill, - RuntimeAppPublic, + ApplyExtrinsicResult, FixedU128, KeyTypeId, Perbill, Percent, Permill, }; use sp_staking::SessionIndex; use sp_std::{collections::btree_map::BTreeMap, prelude::*}; @@ -101,11 +96,13 @@ use sp_std::{collections::btree_map::BTreeMap, prelude::*}; use sp_version::NativeVersion; use sp_version::RuntimeVersion; use xcm::{ - latest::{InteriorLocation, Junction, Junction::PalletInstance}, - VersionedLocation, + latest::prelude::*, IntoVersion, VersionedAssetId, VersionedAssets, VersionedLocation, + VersionedXcm, }; use xcm_builder::PayOverXcm; +use xcm_fee_payment_runtime_api::Error as XcmPaymentApiError; + pub use frame_system::Call as SystemCall; pub use pallet_balances::Call as BalancesCall; pub use pallet_election_provider_multi_phase::{Call as EPMCall, GeometricDepositBase}; @@ -407,46 +404,6 @@ parameter_types! { pub const Offset: BlockNumber = 0; } -#[derive(Clone, Debug, PartialEq, Eq, Encode, Decode)] -pub struct OldSessionKeys { - pub grandpa: ::Public, - pub babe: ::Public, - pub im_online: pallet_im_online::sr25519::AuthorityId, - pub para_validator: ::Public, - pub para_assignment: ::Public, - pub authority_discovery: ::Public, - pub beefy: ::Public, -} - -impl OpaqueKeys for OldSessionKeys { - type KeyTypeIdProviders = (); - fn key_ids() -> &'static [KeyTypeId] { - &[ - <::Public>::ID, - <::Public>::ID, - sp_core::crypto::key_types::IM_ONLINE, - <::Public>::ID, - <::Public>::ID, - <::Public>::ID, - <::Public>::ID, - ] - } - fn get_raw(&self, i: KeyTypeId) -> &[u8] { - match i { - <::Public>::ID => self.grandpa.as_ref(), - <::Public>::ID => self.babe.as_ref(), - sp_core::crypto::key_types::IM_ONLINE => self.im_online.as_ref(), - <::Public>::ID => self.para_validator.as_ref(), - <::Public>::ID => - self.para_assignment.as_ref(), - <::Public>::ID => - self.authority_discovery.as_ref(), - <::Public>::ID => self.beefy.as_ref(), - _ => &[], - } - } -} - impl_opaque_keys! { pub struct SessionKeys { pub grandpa: Grandpa, @@ -458,18 +415,6 @@ impl_opaque_keys! { } } -// remove this when removing `OldSessionKeys` -fn transform_session_keys(_v: AccountId, old: OldSessionKeys) -> SessionKeys { - SessionKeys { - grandpa: old.grandpa, - babe: old.babe, - para_validator: old.para_validator, - para_assignment: old.para_assignment, - authority_discovery: old.authority_discovery, - beefy: old.beefy, - } -} - impl pallet_session::Config for Runtime { type RuntimeEvent = RuntimeEvent; type ValidatorId = AccountId; @@ -1188,6 +1133,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = MessageQueueHeapSize; type MaxStale = MessageQueueMaxStale; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; #[cfg(not(feature = "runtime-benchmarks"))] type MessageProcessor = MessageProcessor; #[cfg(feature = "runtime-benchmarks")] @@ -1302,7 +1248,7 @@ impl paras_registrar::Config for Runtime { type RuntimeOrigin = RuntimeOrigin; type RuntimeEvent = RuntimeEvent; type Currency = Balances; - type OnSwap = (Crowdloan, Slots); + type OnSwap = (Crowdloan, Slots, SwapLeases); type ParaDeposit = ParaDeposit; type DataDepositPerByte = RegistrarDataDepositPerByte; type WeightInfo = weights::runtime_common_paras_registrar::WeightInfo; @@ -1414,6 +1360,14 @@ impl pallet_asset_rate::Config for Runtime { type BenchmarkHelper = runtime_common::impls::benchmarks::AssetRateArguments; } +// Notify `coretime` pallet when a lease swap occurs +pub struct SwapLeases; +impl OnSwap for SwapLeases { + fn on_swap(one: ParaId, other: ParaId) { + coretime::Pallet::::on_legacy_lease_swap(one, other); + } +} + #[frame_support::runtime(legacy_ordering)] mod runtime { #[runtime::runtime] @@ -1650,8 +1604,6 @@ pub type Migrations = migrations::Unreleased; #[allow(deprecated, missing_docs)] pub mod migrations { use super::*; - #[cfg(feature = "try-runtime")] - use sp_core::crypto::ByteArray; pub struct GetLegacyLeaseImpl; impl coretime::migration::GetLegacyLease for GetLegacyLeaseImpl { @@ -1659,11 +1611,11 @@ pub mod migrations { let now = frame_system::Pallet::::block_number(); let lease = slots::Pallet::::lease(para); if lease.is_empty() { - return None + return None; } // Lease not yet started, ignore: if lease.iter().any(Option::is_none) { - return None + return None; } let (index, _) = as Leaser>::lease_period_index(now)?; @@ -1671,83 +1623,6 @@ pub mod migrations { } } - parameter_types! { - pub const ImOnlinePalletName: &'static str = "ImOnline"; - } - - /// Upgrade Session keys to exclude `ImOnline` key. - /// When this is removed, should also remove `OldSessionKeys`. - pub struct UpgradeSessionKeys; - const UPGRADE_SESSION_KEYS_FROM_SPEC: u32 = 104000; - - impl frame_support::traits::OnRuntimeUpgrade for UpgradeSessionKeys { - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { - if System::last_runtime_upgrade_spec_version() > UPGRADE_SESSION_KEYS_FROM_SPEC { - log::warn!(target: "runtime::session_keys", "Skipping session keys migration pre-upgrade check due to spec version (already applied?)"); - return Ok(Vec::new()) - } - - log::info!(target: "runtime::session_keys", "Collecting pre-upgrade session keys state"); - let key_ids = SessionKeys::key_ids(); - frame_support::ensure!( - key_ids.into_iter().find(|&k| *k == sp_core::crypto::key_types::IM_ONLINE) == None, - "New session keys contain the ImOnline key that should have been removed", - ); - let storage_key = pallet_session::QueuedKeys::::hashed_key(); - let mut state: Vec = Vec::new(); - frame_support::storage::unhashed::get::>( - &storage_key, - ) - .ok_or::("Queued keys are not available".into())? - .into_iter() - .for_each(|(id, keys)| { - state.extend_from_slice(id.as_slice()); - for key_id in key_ids { - state.extend_from_slice(keys.get_raw(*key_id)); - } - }); - frame_support::ensure!(state.len() > 0, "Queued keys are not empty before upgrade"); - Ok(state) - } - - fn on_runtime_upgrade() -> Weight { - if System::last_runtime_upgrade_spec_version() > UPGRADE_SESSION_KEYS_FROM_SPEC { - log::warn!("Skipping session keys upgrade: already applied"); - return ::DbWeight::get().reads(1) - } - log::info!("Upgrading session keys"); - Session::upgrade_keys::(transform_session_keys); - Perbill::from_percent(50) * BlockWeights::get().max_block - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade( - old_state: sp_std::vec::Vec, - ) -> Result<(), sp_runtime::TryRuntimeError> { - if System::last_runtime_upgrade_spec_version() > UPGRADE_SESSION_KEYS_FROM_SPEC { - log::warn!(target: "runtime::session_keys", "Skipping session keys migration post-upgrade check due to spec version (already applied?)"); - return Ok(()) - } - - let key_ids = SessionKeys::key_ids(); - let mut new_state: Vec = Vec::new(); - pallet_session::QueuedKeys::::get().into_iter().for_each(|(id, keys)| { - new_state.extend_from_slice(id.as_slice()); - for key_id in key_ids { - new_state.extend_from_slice(keys.get_raw(*key_id)); - } - }); - frame_support::ensure!(new_state.len() > 0, "Queued keys are not empty after upgrade"); - frame_support::ensure!( - old_state == new_state, - "Pre-upgrade and post-upgrade keys do not match!" - ); - log::info!(target: "runtime::session_keys", "Session keys migrated successfully"); - Ok(()) - } - } - // We don't have a limit in the Relay Chain. const IDENTITY_MIGRATION_KEY_LIMIT: u64 = u64::MAX; @@ -1764,11 +1639,6 @@ pub mod migrations { pallet_grandpa::migrations::MigrateV4ToV5, parachains_configuration::migration::v10::MigrateToV10, pallet_nomination_pools::migration::unversioned::TotalValueLockedSync, - UpgradeSessionKeys, - frame_support::migrations::RemovePallet< - ImOnlinePalletName, - ::DbWeight, - >, // Migrate Identity pallet for Usernames pallet_identity::migration::versioned::V0ToV1, parachains_configuration::migration::v11::MigrateToV11, @@ -1920,12 +1790,6 @@ sp_api::impl_runtime_apis! { impl offchain_primitives::OffchainWorkerApi for Runtime { fn offchain_worker(header: &::Header) { - use sp_runtime::{traits::Header, DigestItem}; - - if header.digest().logs().iter().any(|di| di == &DigestItem::RuntimeEnvironmentUpdated) { - pallet_im_online::migration::clear_offchain_storage(Session::validators().len() as u32); - } - Executive::offchain_worker(header) } } @@ -2075,15 +1939,15 @@ sp_api::impl_runtime_apis! { } fn approval_voting_params() -> ApprovalVotingParams { - parachains_staging_runtime_api_impl::approval_voting_params::() + parachains_runtime_api_impl::approval_voting_params::() } fn disabled_validators() -> Vec { - parachains_staging_runtime_api_impl::disabled_validators::() + parachains_runtime_api_impl::disabled_validators::() } fn node_features() -> NodeFeatures { - parachains_staging_runtime_api_impl::node_features::() + parachains_runtime_api_impl::node_features::() } } @@ -2324,6 +2188,37 @@ sp_api::impl_runtime_apis! { } } + impl xcm_fee_payment_runtime_api::XcmPaymentApi for Runtime { + fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { + if !matches!(xcm_version, 3 | 4) { + return Err(XcmPaymentApiError::UnhandledXcmVersion); + } + Ok([VersionedAssetId::V4(xcm_config::TokenLocation::get().into())] + .into_iter() + .filter_map(|asset| asset.into_version(xcm_version).ok()) + .collect()) + } + + fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { + let local_asset = VersionedAssetId::V4(xcm_config::TokenLocation::get().into()); + let asset = asset + .into_version(4) + .map_err(|_| XcmPaymentApiError::VersionedConversionFailed)?; + + if asset != local_asset { return Err(XcmPaymentApiError::AssetNotFound); } + + Ok(WeightToFee::weight_to_fee(&weight)) + } + + fn query_xcm_weight(message: VersionedXcm<()>) -> Result { + XcmPallet::query_xcm_weight(message) + } + + fn query_delivery_fees(destination: VersionedLocation, message: VersionedXcm<()>) -> Result { + XcmPallet::query_delivery_fees(destination, message) + } + } + impl pallet_nomination_pools_runtime_api::NominationPoolsApi< Block, AccountId, @@ -2642,7 +2537,7 @@ mod remote_tests { #[tokio::test] async fn run_migrations() { if var("RUN_MIGRATION_TESTS").is_err() { - return + return; } sp_tracing::try_init_simple(); diff --git a/polkadot/runtime/westend/src/tests.rs b/polkadot/runtime/westend/src/tests.rs index 9f99631605903c805ac42089453e0fb8e5aa0485..4acb81e963b2165ee01881d189daf89c0c1a7924 100644 --- a/polkadot/runtime/westend/src/tests.rs +++ b/polkadot/runtime/westend/src/tests.rs @@ -21,7 +21,6 @@ use std::collections::HashSet; use crate::*; use frame_support::traits::WhitelistedStorageKeys; use sp_core::hexdisplay::HexDisplay; -use xcm::latest::prelude::*; #[test] fn remove_keys_weight_is_sensible() { @@ -55,11 +54,12 @@ fn sanity_check_teleport_assets_weight() { // Usually when XCM runs into an issue, it will return a weight of `Weight::MAX`, // so this test will certainly ensure that this problem does not occur. use frame_support::dispatch::GetDispatchInfo; - let weight = pallet_xcm::Call::::teleport_assets { + let weight = pallet_xcm::Call::::limited_teleport_assets { dest: Box::new(Here.into()), beneficiary: Box::new(Here.into()), assets: Box::new((Here, 200_000).into()), fee_asset_item: 0, + weight_limit: Unlimited, } .get_dispatch_info() .weight; diff --git a/polkadot/runtime/westend/src/weights/pallet_staking.rs b/polkadot/runtime/westend/src/weights/pallet_staking.rs index 7a641e36a126bada81cecceadd9c22f57e5b88e9..393fa0b37176a0ca5ce8160eec7e8e3ac56c3458 100644 --- a/polkadot/runtime/westend/src/weights/pallet_staking.rs +++ b/polkadot/runtime/westend/src/weights/pallet_staking.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_staking` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-01-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-8idpd4bs-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -50,22 +50,22 @@ pub struct WeightInfo(PhantomData); impl pallet_staking::WeightInfo for WeightInfo { /// Storage: `Staking::Bonded` (r:1 w:1) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) - /// Storage: `Staking::Ledger` (r:0 w:1) - /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::Payee` (r:0 w:1) /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn bond() -> Weight { // Proof Size summary in bytes: - // Measured: `894` + // Measured: `1009` // Estimated: `4764` - // Minimum execution time: 37_340_000 picoseconds. - Weight::from_parts(38_930_000, 0) + // Minimum execution time: 40_585_000 picoseconds. + Weight::from_parts(41_800_000, 0) .saturating_add(Weight::from_parts(0, 4764)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `Staking::Bonded` (r:1 w:0) @@ -84,22 +84,22 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1921` // Estimated: `8877` - // Minimum execution time: 80_630_000 picoseconds. - Weight::from_parts(82_196_000, 0) + // Minimum execution time: 81_809_000 picoseconds. + Weight::from_parts(84_387_000, 0) .saturating_add(Weight::from_parts(0, 8877)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(7)) } /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:0) /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) /// Storage: `Staking::MinNominatorBond` (r:1 w:0) /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:1 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -112,43 +112,45 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `2128` // Estimated: `8877` - // Minimum execution time: 83_523_000 picoseconds. - Weight::from_parts(86_639_000, 0) + // Minimum execution time: 89_419_000 picoseconds. + Weight::from_parts(91_237_000, 0) .saturating_add(Weight::from_parts(0, 8877)) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(7)) } /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Staking::CurrentEra` (r:1 w:0) - /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:0) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1075` + // Measured: `1223` // Estimated: `4764` - // Minimum execution time: 38_636_000 picoseconds. - Weight::from_parts(40_399_283, 0) + // Minimum execution time: 45_152_000 picoseconds. + Weight::from_parts(46_460_819, 0) .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 869 - .saturating_add(Weight::from_parts(37_752, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(5)) + // Standard Error: 972 + .saturating_add(Weight::from_parts(55_473, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::SlashingSpans` (r:1 w:1) /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Staking::Bonded` (r:1 w:1) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -174,11 +176,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `2127 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 81_301_000 picoseconds. - Weight::from_parts(88_609_205, 0) + // Minimum execution time: 82_762_000 picoseconds. + Weight::from_parts(91_035_077, 0) .saturating_add(Weight::from_parts(0, 6248)) - // Standard Error: 3_388 - .saturating_add(Weight::from_parts(1_253_692, 0).saturating_mul(s.into())) + // Standard Error: 3_771 + .saturating_add(Weight::from_parts(1_217_871, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(13)) .saturating_add(T::DbWeight::get().writes(11)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -186,6 +188,8 @@ impl pallet_staking::WeightInfo for WeightInfo { } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::MinValidatorBond` (r:1 w:0) /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::MinCommission` (r:1 w:0) @@ -196,8 +200,6 @@ impl pallet_staking::WeightInfo for WeightInfo { /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:0) /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:1 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListNodes` (r:1 w:1) /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:1 w:1) @@ -210,33 +212,37 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1301` // Estimated: `4556` - // Minimum execution time: 47_292_000 picoseconds. - Weight::from_parts(48_566_000, 0) + // Minimum execution time: 50_555_000 picoseconds. + Weight::from_parts(52_052_000, 0) .saturating_add(Weight::from_parts(0, 4556)) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:128 w:128) /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) /// The range of component `k` is `[1, 128]`. fn kick(k: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1243 + k * (569 ±0)` + // Measured: `1778 + k * (572 ±0)` // Estimated: `4556 + k * (3033 ±0)` - // Minimum execution time: 28_840_000 picoseconds. - Weight::from_parts(27_510_817, 0) + // Minimum execution time: 35_037_000 picoseconds. + Weight::from_parts(35_081_878, 0) .saturating_add(Weight::from_parts(0, 4556)) - // Standard Error: 6_603 - .saturating_add(Weight::from_parts(6_268_853, 0).saturating_mul(k.into())) - .saturating_add(T::DbWeight::get().reads(1)) + // Standard Error: 5_473 + .saturating_add(Weight::from_parts(6_667_924, 0).saturating_mul(k.into())) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) .saturating_add(Weight::from_parts(0, 3033).saturating_mul(k.into())) } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::MinNominatorBond` (r:1 w:0) /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:1) @@ -247,8 +253,6 @@ impl pallet_staking::WeightInfo for WeightInfo { /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:1 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListNodes` (r:2 w:2) /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:1 w:1) @@ -262,11 +266,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1797 + n * (102 ±0)` // Estimated: `6248 + n * (2520 ±0)` - // Minimum execution time: 57_537_000 picoseconds. - Weight::from_parts(55_854_233, 0) + // Minimum execution time: 62_098_000 picoseconds. + Weight::from_parts(60_154_061, 0) .saturating_add(Weight::from_parts(0, 6248)) - // Standard Error: 14_427 - .saturating_add(Weight::from_parts(3_844_957, 0).saturating_mul(n.into())) + // Standard Error: 19_257 + .saturating_add(Weight::from_parts(3_839_855, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(6)) @@ -274,6 +278,8 @@ impl pallet_staking::WeightInfo for WeightInfo { } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Validators` (r:1 w:0) /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:1) @@ -288,12 +294,12 @@ impl pallet_staking::WeightInfo for WeightInfo { /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill() -> Weight { // Proof Size summary in bytes: - // Measured: `1581` + // Measured: `1747` // Estimated: `6248` - // Minimum execution time: 49_997_000 picoseconds. - Weight::from_parts(51_266_000, 0) + // Minimum execution time: 54_993_000 picoseconds. + Weight::from_parts(56_698_000, 0) .saturating_add(Weight::from_parts(0, 6248)) - .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(6)) } /// Storage: `Staking::Ledger` (r:1 w:0) @@ -306,40 +312,40 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `865` // Estimated: `4556` - // Minimum execution time: 15_342_000 picoseconds. - Weight::from_parts(15_970_000, 0) + // Minimum execution time: 18_100_000 picoseconds. + Weight::from_parts(18_547_000, 0) .saturating_add(Weight::from_parts(0, 4556)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Staking::Payee` (r:1 w:1) - /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:1 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn update_payee() -> Weight { // Proof Size summary in bytes: // Measured: `932` // Estimated: `4556` - // Minimum execution time: 20_719_000 picoseconds. - Weight::from_parts(21_373_000, 0) + // Minimum execution time: 23_428_000 picoseconds. + Weight::from_parts(24_080_000, 0) .saturating_add(Weight::from_parts(0, 4556)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Staking::Bonded` (r:1 w:1) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) - /// Storage: `Staking::Ledger` (r:1 w:2) + /// Storage: `Staking::Ledger` (r:2 w:2) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) fn set_controller() -> Weight { // Proof Size summary in bytes: // Measured: `865` - // Estimated: `4556` - // Minimum execution time: 18_237_000 picoseconds. - Weight::from_parts(18_896_000, 0) - .saturating_add(Weight::from_parts(0, 4556)) - .saturating_add(T::DbWeight::get().reads(2)) + // Estimated: `8122` + // Minimum execution time: 21_159_000 picoseconds. + Weight::from_parts(21_706_000, 0) + .saturating_add(Weight::from_parts(0, 8122)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Staking::ValidatorCount` (r:0 w:1) @@ -348,8 +354,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_946_000 picoseconds. - Weight::from_parts(2_131_000, 0) + // Minimum execution time: 1_910_000 picoseconds. + Weight::from_parts(2_003_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -359,8 +365,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_840_000 picoseconds. - Weight::from_parts(7_208_000, 0) + // Minimum execution time: 7_076_000 picoseconds. + Weight::from_parts(7_349_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -370,8 +376,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_812_000 picoseconds. - Weight::from_parts(7_254_000, 0) + // Minimum execution time: 7_067_000 picoseconds. + Weight::from_parts(7_389_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -381,8 +387,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_787_000 picoseconds. - Weight::from_parts(7_206_000, 0) + // Minimum execution time: 7_148_000 picoseconds. + Weight::from_parts(7_446_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -393,32 +399,32 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_045_000 picoseconds. - Weight::from_parts(2_281_841, 0) + // Minimum execution time: 2_025_000 picoseconds. + Weight::from_parts(2_229_953, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 70 - .saturating_add(Weight::from_parts(11_592, 0).saturating_mul(v.into())) + // Standard Error: 67 + .saturating_add(Weight::from_parts(11_785, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `Staking::Ledger` (r:751 w:1502) + /// Storage: `Staking::Ledger` (r:1502 w:1502) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:751 w:751) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Payee` (r:751 w:0) /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:0 w:751) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// The range of component `i` is `[0, 751]`. fn deprecate_controller_batch(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `668 + i * (148 ±0)` - // Estimated: `990 + i * (3566 ±0)` - // Minimum execution time: 1_657_000 picoseconds. - Weight::from_parts(1_702_000, 0) + // Measured: `680 + i * (227 ±0)` + // Estimated: `990 + i * (7132 ±0)` + // Minimum execution time: 4_321_000 picoseconds. + Weight::from_parts(4_407_000, 0) .saturating_add(Weight::from_parts(0, 990)) - // Standard Error: 20_041 - .saturating_add(Weight::from_parts(13_165_254, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(i.into()))) + // Standard Error: 37_239 + .saturating_add(Weight::from_parts(21_300_598, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(i.into()))) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(i.into()))) - .saturating_add(Weight::from_parts(0, 3566).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(0, 7132).saturating_mul(i.into())) } /// Storage: `Staking::SlashingSpans` (r:1 w:1) /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -453,11 +459,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `2127 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 78_774_000 picoseconds. - Weight::from_parts(85_770_713, 0) + // Minimum execution time: 78_908_000 picoseconds. + Weight::from_parts(84_886_373, 0) .saturating_add(Weight::from_parts(0, 6248)) - // Standard Error: 2_815 - .saturating_add(Weight::from_parts(1_244_494, 0).saturating_mul(s.into())) + // Standard Error: 3_376 + .saturating_add(Weight::from_parts(1_217_850, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(13)) .saturating_add(T::DbWeight::get().writes(12)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -470,11 +476,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `66639` // Estimated: `70104` - // Minimum execution time: 129_905_000 picoseconds. - Weight::from_parts(932_195_554, 0) + // Minimum execution time: 136_389_000 picoseconds. + Weight::from_parts(1_207_241_524, 0) .saturating_add(Weight::from_parts(0, 70104)) - // Standard Error: 57_492 - .saturating_add(Weight::from_parts(4_826_754, 0).saturating_mul(s.into())) + // Standard Error: 77_138 + .saturating_add(Weight::from_parts(6_443_948, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -511,11 +517,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `8249 + n * (396 ±0)` // Estimated: `10779 + n * (3774 ±0)` - // Minimum execution time: 127_094_000 picoseconds. - Weight::from_parts(160_088_053, 0) + // Minimum execution time: 130_222_000 picoseconds. + Weight::from_parts(167_236_150, 0) .saturating_add(Weight::from_parts(0, 10779)) - // Standard Error: 32_978 - .saturating_add(Weight::from_parts(39_845_710, 0).saturating_mul(n.into())) + // Standard Error: 34_051 + .saturating_add(Weight::from_parts(39_899_917, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(14)) .saturating_add(T::DbWeight::get().reads((6_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(4)) @@ -539,11 +545,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1922 + l * (5 ±0)` // Estimated: `8877` - // Minimum execution time: 75_672_000 picoseconds. - Weight::from_parts(78_708_335, 0) + // Minimum execution time: 79_136_000 picoseconds. + Weight::from_parts(82_129_497, 0) .saturating_add(Weight::from_parts(0, 8877)) - // Standard Error: 3_387 - .saturating_add(Weight::from_parts(37_084, 0).saturating_mul(l.into())) + // Standard Error: 3_867 + .saturating_add(Weight::from_parts(75_156, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(7)) } @@ -578,11 +584,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `2127 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 87_991_000 picoseconds. - Weight::from_parts(90_272_005, 0) + // Minimum execution time: 89_375_000 picoseconds. + Weight::from_parts(91_224_907, 0) .saturating_add(Weight::from_parts(0, 6248)) - // Standard Error: 2_815 - .saturating_add(Weight::from_parts(1_232_322, 0).saturating_mul(s.into())) + // Standard Error: 3_424 + .saturating_add(Weight::from_parts(1_219_542, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(11)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -627,14 +633,14 @@ impl pallet_staking::WeightInfo for WeightInfo { fn new_era(v: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + n * (716 ±0) + v * (3594 ±0)` - // Estimated: `456136 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 528_862_000 picoseconds. - Weight::from_parts(534_620_000, 0) + // Estimated: `456136 + n * (3566 ±4) + v * (3566 ±0)` + // Minimum execution time: 520_905_000 picoseconds. + Weight::from_parts(523_771_000, 0) .saturating_add(Weight::from_parts(0, 456136)) - // Standard Error: 2_005_553 - .saturating_add(Weight::from_parts(65_586_008, 0).saturating_mul(v.into())) - // Standard Error: 199_842 - .saturating_add(Weight::from_parts(18_155_389, 0).saturating_mul(n.into())) + // Standard Error: 2_142_714 + .saturating_add(Weight::from_parts(68_631_588, 0).saturating_mul(v.into())) + // Standard Error: 213_509 + .saturating_add(Weight::from_parts(19_343_025, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(184)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -665,13 +671,13 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `3108 + n * (907 ±0) + v * (391 ±0)` // Estimated: `456136 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 33_532_110_000 picoseconds. - Weight::from_parts(33_926_321_000, 0) + // Minimum execution time: 36_848_619_000 picoseconds. + Weight::from_parts(37_362_442_000, 0) .saturating_add(Weight::from_parts(0, 456136)) - // Standard Error: 374_134 - .saturating_add(Weight::from_parts(4_627_629, 0).saturating_mul(v.into())) - // Standard Error: 374_134 - .saturating_add(Weight::from_parts(4_068_168, 0).saturating_mul(n.into())) + // Standard Error: 415_031 + .saturating_add(Weight::from_parts(5_204_987, 0).saturating_mul(v.into())) + // Standard Error: 415_031 + .saturating_add(Weight::from_parts(4_132_636, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(179)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -688,11 +694,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `946 + v * (50 ±0)` // Estimated: `3510 + v * (2520 ±0)` - // Minimum execution time: 2_395_956_000 picoseconds. - Weight::from_parts(88_416_870, 0) + // Minimum execution time: 2_512_817_000 picoseconds. + Weight::from_parts(119_401_374, 0) .saturating_add(Weight::from_parts(0, 3510)) - // Standard Error: 8_731 - .saturating_add(Weight::from_parts(4_750_956, 0).saturating_mul(v.into())) + // Standard Error: 8_463 + .saturating_add(Weight::from_parts(4_860_364, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(v.into())) @@ -715,8 +721,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_761_000 picoseconds. - Weight::from_parts(4_013_000, 0) + // Minimum execution time: 3_686_000 picoseconds. + Weight::from_parts(3_881_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(7)) } @@ -738,8 +744,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_325_000 picoseconds. - Weight::from_parts(3_519_000, 0) + // Minimum execution time: 3_143_000 picoseconds. + Weight::from_parts(3_424_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(7)) } @@ -769,8 +775,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1870` // Estimated: `6248` - // Minimum execution time: 63_583_000 picoseconds. - Weight::from_parts(65_917_000, 0) + // Minimum execution time: 66_946_000 picoseconds. + Weight::from_parts(69_382_000, 0) .saturating_add(Weight::from_parts(0, 6248)) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(6)) @@ -783,8 +789,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `658` // Estimated: `3510` - // Minimum execution time: 10_975_000 picoseconds. - Weight::from_parts(11_328_000, 0) + // Minimum execution time: 11_278_000 picoseconds. + Weight::from_parts(11_603_000, 0) .saturating_add(Weight::from_parts(0, 3510)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -795,9 +801,29 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_954_000 picoseconds. - Weight::from_parts(2_081_000, 0) + // Minimum execution time: 1_963_000 picoseconds. + Weight::from_parts(2_077_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + fn restore_ledger() -> Weight { + // Proof Size summary in bytes: + // Measured: `1014` + // Estimated: `4764` + // Minimum execution time: 40_258_000 picoseconds. + Weight::from_parts(41_210_000, 0) + .saturating_add(Weight::from_parts(0, 4764)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } } diff --git a/polkadot/runtime/westend/src/weights/pallet_xcm.rs b/polkadot/runtime/westend/src/weights/pallet_xcm.rs index 10725cecf24995e34b3254baa23535cb91dd9981..80bc551ba1e264f9703f5fd307ec7075642bc3bd 100644 --- a/polkadot/runtime/westend/src/weights/pallet_xcm.rs +++ b/polkadot/runtime/westend/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -60,8 +60,26 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `147` // Estimated: `3612` - // Minimum execution time: 25_725_000 picoseconds. - Weight::from_parts(26_174_000, 0) + // Minimum execution time: 24_535_000 picoseconds. + Weight::from_parts(25_618_000, 0) + .saturating_add(Weight::from_parts(0, 3612)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn send_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `147` + // Estimated: `3612` + // Minimum execution time: 25_376_000 picoseconds. + Weight::from_parts(26_180_000, 0) .saturating_add(Weight::from_parts(0, 3612)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -80,8 +98,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `250` // Estimated: `6196` - // Minimum execution time: 113_140_000 picoseconds. - Weight::from_parts(116_204_000, 0) + // Minimum execution time: 108_786_000 picoseconds. + Weight::from_parts(112_208_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -100,8 +118,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `302` // Estimated: `6196` - // Minimum execution time: 108_571_000 picoseconds. - Weight::from_parts(110_650_000, 0) + // Minimum execution time: 105_190_000 picoseconds. + Weight::from_parts(107_140_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -120,8 +138,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `250` // Estimated: `6196` - // Minimum execution time: 111_836_000 picoseconds. - Weight::from_parts(114_435_000, 0) + // Minimum execution time: 109_027_000 picoseconds. + Weight::from_parts(111_404_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -136,14 +154,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn execute_blob() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } /// Storage: `XcmPallet::SupportedVersion` (r:0 w:1) /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_160_000 picoseconds. - Weight::from_parts(7_477_000, 0) + // Minimum execution time: 6_668_000 picoseconds. + Weight::from_parts(7_013_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -151,8 +179,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_934_000 picoseconds. - Weight::from_parts(2_053_000, 0) + // Minimum execution time: 1_740_000 picoseconds. + Weight::from_parts(1_884_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) @@ -173,8 +201,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `147` // Estimated: `3612` - // Minimum execution time: 31_123_000 picoseconds. - Weight::from_parts(31_798_000, 0) + // Minimum execution time: 30_200_000 picoseconds. + Weight::from_parts(30_768_000, 0) .saturating_add(Weight::from_parts(0, 3612)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) @@ -195,8 +223,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `327` // Estimated: `3792` - // Minimum execution time: 35_175_000 picoseconds. - Weight::from_parts(36_098_000, 0) + // Minimum execution time: 33_928_000 picoseconds. + Weight::from_parts(35_551_000, 0) .saturating_add(Weight::from_parts(0, 3792)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) @@ -207,8 +235,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_974_000 picoseconds. - Weight::from_parts(2_096_000, 0) + // Minimum execution time: 1_759_000 picoseconds. + Weight::from_parts(1_880_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -218,8 +246,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `22` // Estimated: `13387` - // Minimum execution time: 16_626_000 picoseconds. - Weight::from_parts(17_170_000, 0) + // Minimum execution time: 16_507_000 picoseconds. + Weight::from_parts(17_219_000, 0) .saturating_add(Weight::from_parts(0, 13387)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -230,8 +258,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `26` // Estimated: `13391` - // Minimum execution time: 16_937_000 picoseconds. - Weight::from_parts(17_447_000, 0) + // Minimum execution time: 16_633_000 picoseconds. + Weight::from_parts(16_889_000, 0) .saturating_add(Weight::from_parts(0, 13391)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -242,8 +270,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `40` // Estimated: `15880` - // Minimum execution time: 19_157_000 picoseconds. - Weight::from_parts(19_659_000, 0) + // Minimum execution time: 19_297_000 picoseconds. + Weight::from_parts(19_820_000, 0) .saturating_add(Weight::from_parts(0, 15880)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -261,8 +289,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `183` // Estimated: `6123` - // Minimum execution time: 30_699_000 picoseconds. - Weight::from_parts(31_537_000, 0) + // Minimum execution time: 30_364_000 picoseconds. + Weight::from_parts(31_122_000, 0) .saturating_add(Weight::from_parts(0, 6123)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) @@ -273,8 +301,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `69` // Estimated: `10959` - // Minimum execution time: 12_303_000 picoseconds. - Weight::from_parts(12_670_000, 0) + // Minimum execution time: 11_997_000 picoseconds. + Weight::from_parts(12_392_000, 0) .saturating_add(Weight::from_parts(0, 10959)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -284,8 +312,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `33` // Estimated: `13398` - // Minimum execution time: 17_129_000 picoseconds. - Weight::from_parts(17_668_000, 0) + // Minimum execution time: 16_894_000 picoseconds. + Weight::from_parts(17_452_000, 0) .saturating_add(Weight::from_parts(0, 13398)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -304,8 +332,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `183` // Estimated: `13548` - // Minimum execution time: 39_960_000 picoseconds. - Weight::from_parts(41_068_000, 0) + // Minimum execution time: 39_864_000 picoseconds. + Weight::from_parts(40_859_000, 0) .saturating_add(Weight::from_parts(0, 13548)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) @@ -318,8 +346,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 2_333_000 picoseconds. - Weight::from_parts(2_504_000, 0) + // Minimum execution time: 2_363_000 picoseconds. + Weight::from_parts(2_519_000, 0) .saturating_add(Weight::from_parts(0, 1485)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -330,8 +358,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7576` // Estimated: `11041` - // Minimum execution time: 22_932_000 picoseconds. - Weight::from_parts(23_307_000, 0) + // Minimum execution time: 22_409_000 picoseconds. + Weight::from_parts(22_776_000, 0) .saturating_add(Weight::from_parts(0, 11041)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -342,8 +370,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `23` // Estimated: `3488` - // Minimum execution time: 34_558_000 picoseconds. - Weight::from_parts(35_299_000, 0) + // Minimum execution time: 33_551_000 picoseconds. + Weight::from_parts(34_127_000, 0) .saturating_add(Weight::from_parts(0, 3488)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/westend/src/weights/xcm/mod.rs b/polkadot/runtime/westend/src/weights/xcm/mod.rs index 0162012825ff7e4d47c43868ac94f894b38514c7..09e883a9f7af5820832e7db52a8eeba5148d3a48 100644 --- a/polkadot/runtime/westend/src/weights/xcm/mod.rs +++ b/polkadot/runtime/westend/src/weights/xcm/mod.rs @@ -142,7 +142,7 @@ impl XcmWeightInfo for WestendXcmWeight { fn descend_origin(_who: &InteriorLocation) -> Weight { XcmGeneric::::descend_origin() } - fn report_error(_query_repsonse_info: &QueryResponseInfo) -> Weight { + fn report_error(_query_response_info: &QueryResponseInfo) -> Weight { XcmGeneric::::report_error() } diff --git a/polkadot/runtime/westend/src/xcm_config.rs b/polkadot/runtime/westend/src/xcm_config.rs index 400843c58356bfd59039b63ce66382a667dc4c7e..96d2a124ff9a343364696025ebae9393baa6dc6c 100644 --- a/polkadot/runtime/westend/src/xcm_config.rs +++ b/polkadot/runtime/westend/src/xcm_config.rs @@ -41,10 +41,11 @@ use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, ChildParachainAsNative, ChildParachainConvertsVia, DescribeAllTerminal, DescribeFamily, FrameTransactionalProcessor, - FungibleAdapter, HashedDescription, IsConcrete, MintLocation, OriginToPluralityVoice, - SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, - TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, - XcmFeeManagerFromComponents, XcmFeeToAccount, + FungibleAdapter, HashedDescription, IsChildSystemParachain, IsConcrete, MintLocation, + OriginToPluralityVoice, SignedAccountId32AsNative, SignedToAccountId32, + SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, + WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, + XcmFeeToAccount, }; use xcm_executor::XcmExecutor; @@ -142,13 +143,12 @@ impl Contains for OnlyParachains { } } -pub struct CollectivesOrFellows; -impl Contains for CollectivesOrFellows { +pub struct Fellows; +impl Contains for Fellows { fn contains(location: &Location) -> bool { matches!( location.unpack(), - (0, [Parachain(COLLECTIVES_ID)]) | - (0, [Parachain(COLLECTIVES_ID), Plurality { id: BodyId::Technical, .. }]) + (0, [Parachain(COLLECTIVES_ID), Plurality { id: BodyId::Technical, .. }]) ) } } @@ -172,8 +172,8 @@ pub type Barrier = TrailingSetTopicAsId<( AllowTopLevelPaidExecutionFrom, // Subscriptions for version tracking are OK. AllowSubscriptionsFrom, - // Collectives and Fellows plurality get free execution. - AllowExplicitUnpaidExecutionFrom, + // Messages from system parachains or the Fellows plurality need not pay for execution. + AllowExplicitUnpaidExecutionFrom<(IsChildSystemParachain, Fellows)>, ), UniversalLocation, ConstU32<8>, diff --git a/polkadot/scripts/packaging/polkadot.service b/polkadot/scripts/packaging/polkadot.service index 7fb549c97f8b9ee439e960d1ff233aff8a6bd514..8c5a483d4243e2a921a40a4b8a9b872efdbdaeb5 100644 --- a/polkadot/scripts/packaging/polkadot.service +++ b/polkadot/scripts/packaging/polkadot.service @@ -25,12 +25,13 @@ ProtectKernelTunables=true ProtectSystem=strict RemoveIPC=true RestrictAddressFamilies=AF_INET AF_INET6 AF_NETLINK AF_UNIX -RestrictNamespaces=true +RestrictNamespaces=false RestrictSUIDSGID=true SystemCallArchitectures=native SystemCallFilter=@system-service -SystemCallFilter=landlock_add_rule landlock_create_ruleset landlock_restrict_self seccomp -SystemCallFilter=~@clock @module @mount @reboot @swap @privileged +SystemCallFilter=landlock_add_rule landlock_create_ruleset landlock_restrict_self seccomp mount umount2 +SystemCallFilter=~@clock @module @reboot @swap @privileged +SystemCallFilter=pivot_root UMask=0027 [Install] diff --git a/polkadot/tests/common.rs b/polkadot/tests/common.rs index 15721c990e01b30b97db32151b0899c1e5a3888b..dbee2d3650347e228db0b3663a696f67f5a2240a 100644 --- a/polkadot/tests/common.rs +++ b/polkadot/tests/common.rs @@ -48,8 +48,8 @@ pub async fn wait_n_finalized_blocks(n: usize, url: &str) { /// Read the WS address from the output. /// -/// This is hack to get the actual binded sockaddr because -/// polkadot assigns a random port if the specified port was already binded. +/// This is hack to get the actual bound sockaddr because +/// polkadot assigns a random port if the specified port was already bound. /// /// You must call /// `Command::new("cmd").stdout(process::Stdio::piped()).stderr(process::Stdio::piped())` diff --git a/polkadot/tests/running_the_node_and_interrupt.rs b/polkadot/tests/running_the_node_and_interrupt.rs index 079c34e0421e8a1936359b03bc46f3fea87f4b8b..85c073d3023a8e28417a2f0eb5047e2f04100820 100644 --- a/polkadot/tests/running_the_node_and_interrupt.rs +++ b/polkadot/tests/running_the_node_and_interrupt.rs @@ -32,7 +32,7 @@ async fn running_the_node_works_and_can_be_interrupted() { }; async fn run_command_and_kill(signal: Signal) { - let tmpdir = tempdir().expect("coult not create temp dir"); + let tmpdir = tempdir().expect("could not create temp dir"); let mut cmd = Command::new(cargo_bin("polkadot")) .stdout(process::Stdio::piped()) diff --git a/polkadot/xcm/Cargo.toml b/polkadot/xcm/Cargo.toml index f9ccfb9833a6b276f001491ff29daa183f0f8e22..b214342d2f4860837ec0d2a56daa9108f1a53fe2 100644 --- a/polkadot/xcm/Cargo.toml +++ b/polkadot/xcm/Cargo.toml @@ -16,7 +16,7 @@ derivative = { version = "2.2.0", default-features = false, features = ["use_cor impl-trait-for-tuples = "0.2.2" log = { workspace = true } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } sp-weights = { path = "../../substrate/primitives/weights", default-features = false, features = ["serde"] } serde = { features = ["alloc", "derive", "rc"], workspace = true } schemars = { version = "0.8.13", default-features = true, optional = true } diff --git a/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml b/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml index 80f2d1deedf724c28ca71508a2b21411b57096ee..8c71426a6faee49023603caa250f37bcbb42f631 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml +++ b/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../substrate/frame/system", default-features = false } sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } diff --git a/polkadot/xcm/pallet-xcm/Cargo.toml b/polkadot/xcm/pallet-xcm/Cargo.toml index 4840b6127f55425de91eea85099649d4dfda0881..460597e6649ab27acec712e1b055fbf5968005f2 100644 --- a/polkadot/xcm/pallet-xcm/Cargo.toml +++ b/polkadot/xcm/pallet-xcm/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] bounded-collections = { version = "0.2.0", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } log = { workspace = true } @@ -26,6 +26,7 @@ sp-std = { path = "../../../substrate/primitives/std", default-features = false xcm = { package = "staging-xcm", path = "..", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../xcm-executor", default-features = false } xcm-builder = { package = "staging-xcm-builder", path = "../xcm-builder", default-features = false } +xcm-fee-payment-runtime-api = { path = "../xcm-fee-payment-runtime-api", default-features = false } # marked optional, used in benchmarking frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } @@ -54,6 +55,7 @@ std = [ "sp-std/std", "xcm-builder/std", "xcm-executor/std", + "xcm-fee-payment-runtime-api/std", "xcm/std", ] runtime-benchmarks = [ diff --git a/polkadot/xcm/pallet-xcm/src/benchmarking.rs b/polkadot/xcm/pallet-xcm/src/benchmarking.rs index ed42f93692b406ea07b2edbda0fbdb315c2b4071..e2903d592dc18aeafc7ef15d5ccc8d3fade1bcef 100644 --- a/polkadot/xcm/pallet-xcm/src/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm/src/benchmarking.rs @@ -16,6 +16,7 @@ use super::*; use bounded_collections::{ConstU32, WeakBoundedVec}; +use codec::Encode; use frame_benchmarking::{benchmarks, whitelisted_caller, BenchmarkError, BenchmarkResult}; use frame_support::{ traits::fungible::{Inspect, Mutate}, @@ -108,6 +109,21 @@ benchmarks! { let versioned_msg = VersionedXcm::from(msg); }: _>(send_origin, Box::new(versioned_dest), Box::new(versioned_msg)) + send_blob { + let send_origin = + T::SendXcmOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + if T::SendXcmOrigin::try_origin(send_origin.clone()).is_err() { + return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) + } + let msg = Xcm::<()>(vec![ClearOrigin]); + let versioned_dest: VersionedLocation = T::reachable_dest().ok_or( + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), + )? + .into(); + let versioned_msg = VersionedXcm::from(msg); + let encoded_versioned_msg = versioned_msg.encode().try_into().unwrap(); + }: _>(send_origin, Box::new(versioned_dest), encoded_versioned_msg) + teleport_assets { let (asset, destination) = T::teleportable_asset_and_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), @@ -227,6 +243,19 @@ benchmarks! { let versioned_msg = VersionedXcm::from(msg); }: _>(execute_origin, Box::new(versioned_msg), Weight::MAX) + execute_blob { + let execute_origin = + T::ExecuteXcmOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + let origin_location = T::ExecuteXcmOrigin::try_origin(execute_origin.clone()) + .map_err(|_| BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; + let msg = Xcm(vec![ClearOrigin]); + if !T::XcmExecuteFilter::contains(&(origin_location, msg.clone())) { + return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) + } + let versioned_msg = VersionedXcm::from(msg); + let encoded_versioned_msg = versioned_msg.encode().try_into().unwrap(); + }: _>(execute_origin, encoded_versioned_msg, Weight::MAX) + force_xcm_version { let loc = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 1a1f8b402a39041ffb5852dd56e9725a550afc7c..ef255068734aae29d879e16c575a8a5cea4c5e59 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -50,8 +50,8 @@ use sp_runtime::{ use sp_std::{boxed::Box, marker::PhantomData, prelude::*, result::Result, vec}; use xcm::{latest::QueryResponseInfo, prelude::*}; use xcm_builder::{ - ExecuteController, ExecuteControllerWeightInfo, QueryController, QueryControllerWeightInfo, - SendController, SendControllerWeightInfo, + ExecuteController, ExecuteControllerWeightInfo, MaxXcmEncodedSize, QueryController, + QueryControllerWeightInfo, SendController, SendControllerWeightInfo, }; use xcm_executor::{ traits::{ @@ -61,6 +61,7 @@ use xcm_executor::{ }, AssetsInHolding, }; +use xcm_fee_payment_runtime_api::Error as FeePaymentError; #[cfg(any(feature = "try-runtime", test))] use sp_runtime::TryRuntimeError; @@ -86,6 +87,8 @@ pub trait WeightInfo { fn new_query() -> Weight; fn take_response() -> Weight; fn claim_assets() -> Weight; + fn execute_blob() -> Weight; + fn send_blob() -> Weight; } /// fallback implementation @@ -170,6 +173,14 @@ impl WeightInfo for TestWeightInfo { fn claim_assets() -> Weight { Weight::from_parts(100_000_000, 0) } + + fn execute_blob() -> Weight { + Weight::from_parts(100_000_000, 0) + } + + fn send_blob() -> Weight { + Weight::from_parts(100_000_000, 0) + } } #[frame_support::pallet] @@ -285,76 +296,49 @@ pub mod pallet { } impl ExecuteControllerWeightInfo for Pallet { - fn execute() -> Weight { - T::WeightInfo::execute() + fn execute_blob() -> Weight { + T::WeightInfo::execute_blob() } } impl ExecuteController, ::RuntimeCall> for Pallet { type WeightInfo = Self; - fn execute( + fn execute_blob( origin: OriginFor, - message: Box::RuntimeCall>>, + encoded_message: BoundedVec, max_weight: Weight, ) -> Result { - log::trace!(target: "xcm::pallet_xcm::execute", "message {:?}, max_weight {:?}", message, max_weight); - let outcome = (|| { - let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; - let mut hash = message.using_encoded(sp_io::hashing::blake2_256); - let message = (*message).try_into().map_err(|()| Error::::BadVersion)?; - let value = (origin_location, message); - ensure!(T::XcmExecuteFilter::contains(&value), Error::::Filtered); - let (origin_location, message) = value; - Ok(T::XcmExecutor::prepare_and_execute( - origin_location, - message, - &mut hash, - max_weight, - max_weight, - )) - })() - .map_err(|e: DispatchError| { - e.with_weight(::execute()) - })?; - - Self::deposit_event(Event::Attempted { outcome: outcome.clone() }); - let weight_used = outcome.weight_used(); - outcome.ensure_complete().map_err(|error| { - log::error!(target: "xcm::pallet_xcm::execute", "XCM execution failed with error {:?}", error); - Error::::LocalExecutionIncomplete.with_weight( - weight_used.saturating_add( - ::execute(), - ), - ) - })?; - Ok(weight_used) + let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; + let message = + VersionedXcm::<::RuntimeCall>::decode(&mut &encoded_message[..]) + .map_err(|error| { + log::error!(target: "xcm::execute_blob", "Unable to decode XCM, error: {:?}", error); + Error::::UnableToDecode + })?; + Self::execute_base(origin_location, Box::new(message), max_weight) } } impl SendControllerWeightInfo for Pallet { - fn send() -> Weight { - T::WeightInfo::send() + fn send_blob() -> Weight { + T::WeightInfo::send_blob() } } impl SendController> for Pallet { type WeightInfo = Self; - fn send( + fn send_blob( origin: OriginFor, dest: Box, - message: Box>, + encoded_message: BoundedVec, ) -> Result { let origin_location = T::SendXcmOrigin::ensure_origin(origin)?; - let interior: Junctions = - origin_location.clone().try_into().map_err(|_| Error::::InvalidOrigin)?; - let dest = Location::try_from(*dest).map_err(|()| Error::::BadVersion)?; - let message: Xcm<()> = (*message).try_into().map_err(|()| Error::::BadVersion)?; - - let message_id = Self::send_xcm(interior, dest.clone(), message.clone()) - .map_err(Error::::from)?; - let e = Event::Sent { origin: origin_location, destination: dest, message, message_id }; - Self::deposit_event(e); - Ok(message_id) + let message = + VersionedXcm::<()>::decode(&mut &encoded_message[..]).map_err(|error| { + log::error!(target: "xcm::send_blob", "Unable to decode XCM, error: {:?}", error); + Error::::UnableToDecode + })?; + Self::send_base(origin_location, dest, Box::new(message)) } } @@ -374,7 +358,7 @@ pub mod pallet { origin: OriginFor, timeout: BlockNumberFor, match_querier: VersionedLocation, - ) -> Result { + ) -> Result { let responder = ::ExecuteXcmOrigin::ensure_origin(origin)?; let query_id = ::new_query( responder, @@ -561,6 +545,11 @@ pub mod pallet { TooManyReserves, /// Local XCM execution incomplete. LocalExecutionIncomplete, + /// Could not decode XCM. + UnableToDecode, + /// XCM encoded length is too large. + /// Returned when an XCM encoded length is larger than `MaxXcmEncodedSize`. + XcmTooLarge, } impl From for Error { @@ -898,16 +887,72 @@ pub mod pallet { } } - #[pallet::call] impl Pallet { + /// Underlying logic for both [`execute_blob`] and [`execute`]. + fn execute_base( + origin_location: Location, + message: Box::RuntimeCall>>, + max_weight: Weight, + ) -> Result { + log::trace!(target: "xcm::pallet_xcm::execute", "message {:?}, max_weight {:?}", message, max_weight); + let outcome = (|| { + let mut hash = message.using_encoded(sp_io::hashing::blake2_256); + let message = (*message).try_into().map_err(|()| Error::::BadVersion)?; + let value = (origin_location, message); + ensure!(T::XcmExecuteFilter::contains(&value), Error::::Filtered); + let (origin_location, message) = value; + Ok(T::XcmExecutor::prepare_and_execute( + origin_location, + message, + &mut hash, + max_weight, + max_weight, + )) + })() + .map_err(|e: DispatchError| e.with_weight(T::WeightInfo::execute()))?; + + Self::deposit_event(Event::Attempted { outcome: outcome.clone() }); + let weight_used = outcome.weight_used(); + outcome.ensure_complete().map_err(|error| { + log::error!(target: "xcm::pallet_xcm::execute", "XCM execution failed with error {:?}", error); + Error::::LocalExecutionIncomplete + .with_weight(weight_used.saturating_add(T::WeightInfo::execute())) + })?; + Ok(weight_used) + } + + /// Underlying logic for both [`send_blob`] and [`send`]. + fn send_base( + origin_location: Location, + dest: Box, + message: Box>, + ) -> Result { + let interior: Junctions = + origin_location.clone().try_into().map_err(|_| Error::::InvalidOrigin)?; + let dest = Location::try_from(*dest).map_err(|()| Error::::BadVersion)?; + let message: Xcm<()> = (*message).try_into().map_err(|()| Error::::BadVersion)?; + + let message_id = Self::send_xcm(interior, dest.clone(), message.clone()) + .map_err(Error::::from)?; + let e = Event::Sent { origin: origin_location, destination: dest, message, message_id }; + Self::deposit_event(e); + Ok(message_id) + } + } + + #[pallet::call(weight(::WeightInfo))] + impl Pallet { + /// WARNING: DEPRECATED. `send` will be removed after June 2024. Use `send_blob` instead. + #[allow(deprecated)] + #[deprecated(note = "`send` will be removed after June 2024. Use `send_blob` instead.")] #[pallet::call_index(0)] - #[pallet::weight(T::WeightInfo::send())] pub fn send( origin: OriginFor, dest: Box, message: Box>, ) -> DispatchResult { - >::send(origin, dest, message)?; + let origin_location = T::SendXcmOrigin::ensure_origin(origin)?; + Self::send_base(origin_location, dest, message)?; Ok(()) } @@ -930,23 +975,10 @@ pub mod pallet { /// - `fee_asset_item`: The index into `assets` of the item which should be used to pay /// fees. #[pallet::call_index(1)] - #[pallet::weight({ - let maybe_assets: Result = (*assets.clone()).try_into(); - let maybe_dest: Result = (*dest.clone()).try_into(); - match (maybe_assets, maybe_dest) { - (Ok(assets), Ok(dest)) => { - use sp_std::vec; - let count = assets.len() as u32; - let mut message = Xcm(vec![ - WithdrawAsset(assets), - SetFeesMode { jit_withdraw: true }, - InitiateTeleport { assets: Wild(AllCounted(count)), dest, xcm: Xcm(vec![]) }, - ]); - T::Weigher::weight(&mut message).map_or(Weight::MAX, |w| T::WeightInfo::teleport_assets().saturating_add(w)) - } - _ => Weight::MAX, - } - })] + #[allow(deprecated)] + #[deprecated( + note = "This extrinsic uses `WeightLimit::Unlimited`, please migrate to `limited_teleport_assets` or `transfer_assets`" + )] pub fn teleport_assets( origin: OriginFor, dest: Box, @@ -988,23 +1020,10 @@ pub mod pallet { /// - `fee_asset_item`: The index into `assets` of the item which should be used to pay /// fees. #[pallet::call_index(2)] - #[pallet::weight({ - let maybe_assets: Result = (*assets.clone()).try_into(); - let maybe_dest: Result = (*dest.clone()).try_into(); - match (maybe_assets, maybe_dest) { - (Ok(assets), Ok(dest)) => { - use sp_std::vec; - // heaviest version of locally executed XCM program: equivalent in weight to - // transfer assets to SA, reanchor them, extend XCM program, and send onward XCM - let mut message = Xcm(vec![ - SetFeesMode { jit_withdraw: true }, - TransferReserveAsset { assets, dest, xcm: Xcm(vec![]) } - ]); - T::Weigher::weight(&mut message).map_or(Weight::MAX, |w| T::WeightInfo::reserve_transfer_assets().saturating_add(w)) - } - _ => Weight::MAX, - } - })] + #[allow(deprecated)] + #[deprecated( + note = "This extrinsic uses `WeightLimit::Unlimited`, please migrate to `limited_reserve_transfer_assets` or `transfer_assets`" + )] pub fn reserve_transfer_assets( origin: OriginFor, dest: Box, @@ -1030,6 +1049,13 @@ pub mod pallet { /// No more than `max_weight` will be used in its attempted execution. If this is less than /// the maximum amount of weight that the message could take to be executed, then no /// execution attempt will be made. + /// + /// WARNING: DEPRECATED. `execute` will be removed after June 2024. Use `execute_blob` + /// instead. + #[allow(deprecated)] + #[deprecated( + note = "`execute` will be removed after June 2024. Use `execute_blob` instead." + )] #[pallet::call_index(3)] #[pallet::weight(max_weight.saturating_add(T::WeightInfo::execute()))] pub fn execute( @@ -1037,8 +1063,8 @@ pub mod pallet { message: Box::RuntimeCall>>, max_weight: Weight, ) -> DispatchResultWithPostInfo { - let weight_used = - >::execute(origin, message, max_weight)?; + let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; + let weight_used = Self::execute_base(origin_location, message, max_weight)?; Ok(Some(weight_used.saturating_add(T::WeightInfo::execute())).into()) } @@ -1049,7 +1075,6 @@ pub mod pallet { /// - `location`: The destination that is being described. /// - `xcm_version`: The latest version of XCM that `location` supports. #[pallet::call_index(4)] - #[pallet::weight(T::WeightInfo::force_xcm_version())] pub fn force_xcm_version( origin: OriginFor, location: Box, @@ -1068,7 +1093,6 @@ pub mod pallet { /// - `origin`: Must be an origin specified by AdminOrigin. /// - `maybe_xcm_version`: The default XCM encoding version, or `None` to disable. #[pallet::call_index(5)] - #[pallet::weight(T::WeightInfo::force_default_xcm_version())] pub fn force_default_xcm_version( origin: OriginFor, maybe_xcm_version: Option, @@ -1083,7 +1107,6 @@ pub mod pallet { /// - `origin`: Must be an origin specified by AdminOrigin. /// - `location`: The location to which we should subscribe for XCM version notifications. #[pallet::call_index(6)] - #[pallet::weight(T::WeightInfo::force_subscribe_version_notify())] pub fn force_subscribe_version_notify( origin: OriginFor, location: Box, @@ -1107,7 +1130,6 @@ pub mod pallet { /// - `location`: The location to which we are currently subscribed for XCM version /// notifications which we no longer desire. #[pallet::call_index(7)] - #[pallet::weight(T::WeightInfo::force_unsubscribe_version_notify())] pub fn force_unsubscribe_version_notify( origin: OriginFor, location: Box, @@ -1140,7 +1162,7 @@ pub mod pallet { /// /// Fee payment on the destination side is made from the asset in the `assets` vector of /// index `fee_asset_item`, up to enough to pay for `weight_limit` of weight. If more weight - /// is needed than `weight_limit`, then the operation will fail and the assets send may be + /// is needed than `weight_limit`, then the operation will fail and the sent assets may be /// at risk. /// /// - `origin`: Must be capable of withdrawing the `assets` and executing XCM. @@ -1155,23 +1177,7 @@ pub mod pallet { /// fees. /// - `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase. #[pallet::call_index(8)] - #[pallet::weight({ - let maybe_assets: Result = (*assets.clone()).try_into(); - let maybe_dest: Result = (*dest.clone()).try_into(); - match (maybe_assets, maybe_dest) { - (Ok(assets), Ok(dest)) => { - use sp_std::vec; - // heaviest version of locally executed XCM program: equivalent in weight to - // transfer assets to SA, reanchor them, extend XCM program, and send onward XCM - let mut message = Xcm(vec![ - SetFeesMode { jit_withdraw: true }, - TransferReserveAsset { assets, dest, xcm: Xcm(vec![]) } - ]); - T::Weigher::weight(&mut message).map_or(Weight::MAX, |w| T::WeightInfo::reserve_transfer_assets().saturating_add(w)) - } - _ => Weight::MAX, - } - })] + #[pallet::weight(T::WeightInfo::reserve_transfer_assets())] pub fn limited_reserve_transfer_assets( origin: OriginFor, dest: Box, @@ -1194,7 +1200,7 @@ pub mod pallet { /// /// Fee payment on the destination side is made from the asset in the `assets` vector of /// index `fee_asset_item`, up to enough to pay for `weight_limit` of weight. If more weight - /// is needed than `weight_limit`, then the operation will fail and the assets send may be + /// is needed than `weight_limit`, then the operation will fail and the sent assets may be /// at risk. /// /// - `origin`: Must be capable of withdrawing the `assets` and executing XCM. @@ -1209,23 +1215,7 @@ pub mod pallet { /// fees. /// - `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase. #[pallet::call_index(9)] - #[pallet::weight({ - let maybe_assets: Result = (*assets.clone()).try_into(); - let maybe_dest: Result = (*dest.clone()).try_into(); - match (maybe_assets, maybe_dest) { - (Ok(assets), Ok(dest)) => { - use sp_std::vec; - let count = assets.len() as u32; - let mut message = Xcm(vec![ - WithdrawAsset(assets), - SetFeesMode { jit_withdraw: true }, - InitiateTeleport { assets: Wild(AllCounted(count)), dest, xcm: Xcm(vec![]) }, - ]); - T::Weigher::weight(&mut message).map_or(Weight::MAX, |w| T::WeightInfo::teleport_assets().saturating_add(w)) - } - _ => Weight::MAX, - } - })] + #[pallet::weight(T::WeightInfo::teleport_assets())] pub fn limited_teleport_assets( origin: OriginFor, dest: Box, @@ -1249,7 +1239,6 @@ pub mod pallet { /// - `origin`: Must be an origin specified by AdminOrigin. /// - `suspended`: `true` to suspend, `false` to resume. #[pallet::call_index(10)] - #[pallet::weight(T::WeightInfo::force_suspension())] pub fn force_suspension(origin: OriginFor, suspended: bool) -> DispatchResult { T::AdminOrigin::ensure_origin(origin)?; XcmExecutionSuspended::::set(suspended); @@ -1262,7 +1251,7 @@ pub mod pallet { /// Fee payment on the destination side is made from the asset in the `assets` vector of /// index `fee_asset_item` (hence referred to as `fees`), up to enough to pay for /// `weight_limit` of weight. If more weight is needed than `weight_limit`, then the - /// operation will fail and the assets sent may be at risk. + /// operation will fail and the sent assets may be at risk. /// /// `assets` (excluding `fees`) must have same reserve location or otherwise be teleportable /// to `dest`, no limitations imposed on `fees`. @@ -1290,26 +1279,6 @@ pub mod pallet { /// fees. /// - `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase. #[pallet::call_index(11)] - #[pallet::weight({ - let maybe_assets: Result = (*assets.clone()).try_into(); - let maybe_dest: Result = (*dest.clone()).try_into(); - match (maybe_assets, maybe_dest) { - (Ok(assets), Ok(dest)) => { - use sp_std::vec; - // heaviest version of locally executed XCM program: equivalent in weight to withdrawing fees, - // burning them, transferring rest of assets to SA, reanchoring them, extending XCM program, - // and sending onward XCM - let mut message = Xcm(vec![ - SetFeesMode { jit_withdraw: true }, - WithdrawAsset(assets.clone()), - BurnAsset(assets.clone()), - TransferReserveAsset { assets, dest, xcm: Xcm(vec![]) } - ]); - T::Weigher::weight(&mut message).map_or(Weight::MAX, |w| T::WeightInfo::transfer_assets().saturating_add(w)) - } - _ => Weight::MAX, - } - })] pub fn transfer_assets( origin: OriginFor, dest: Box, @@ -1399,22 +1368,6 @@ pub mod pallet { /// was the latest when they were trapped. /// - `beneficiary`: The location/account where the claimed assets will be deposited. #[pallet::call_index(12)] - #[pallet::weight({ - let assets_version = assets.identify_version(); - let maybe_assets: Result = (*assets.clone()).try_into(); - let maybe_beneficiary: Result = (*beneficiary.clone()).try_into(); - match (maybe_assets, maybe_beneficiary) { - (Ok(assets), Ok(beneficiary)) => { - let ticket: Location = GeneralIndex(assets_version as u128).into(); - let mut message = Xcm(vec![ - ClaimAsset { assets: assets.clone(), ticket }, - DepositAsset { assets: AllCounted(assets.len() as u32).into(), beneficiary }, - ]); - T::Weigher::weight(&mut message).map_or(Weight::MAX, |w| T::WeightInfo::claim_assets().saturating_add(w)) - } - _ => Weight::MAX - } - })] pub fn claim_assets( origin: OriginFor, assets: Box, @@ -1449,6 +1402,47 @@ pub mod pallet { })?; Ok(()) } + + /// Execute an XCM from a local, signed, origin. + /// + /// An event is deposited indicating whether the message could be executed completely + /// or only partially. + /// + /// No more than `max_weight` will be used in its attempted execution. If this is less than + /// the maximum amount of weight that the message could take to be executed, then no + /// execution attempt will be made. + /// + /// The message is passed in encoded. It needs to be decodable as a [`VersionedXcm`]. + #[pallet::call_index(13)] + #[pallet::weight(max_weight.saturating_add(T::WeightInfo::execute_blob()))] + pub fn execute_blob( + origin: OriginFor, + encoded_message: BoundedVec, + max_weight: Weight, + ) -> DispatchResultWithPostInfo { + let weight_used = >::execute_blob( + origin, + encoded_message, + max_weight, + )?; + Ok(Some(weight_used.saturating_add(T::WeightInfo::execute_blob())).into()) + } + + /// Send an XCM from a local, signed, origin. + /// + /// The destination, `dest`, will receive this message with a `DescendOrigin` instruction + /// that makes the origin of the message be the origin on this system. + /// + /// The message is passed in encoded. It needs to be decodable as a [`VersionedXcm`]. + #[pallet::call_index(14)] + pub fn send_blob( + origin: OriginFor, + dest: Box, + encoded_message: BoundedVec, + ) -> DispatchResult { + >::send_blob(origin, dest, encoded_message)?; + Ok(()) + } } } @@ -1478,7 +1472,6 @@ impl sp_std::fmt::Debug for FeesHandling { } impl QueryHandler for Pallet { - type QueryId = u64; type BlockNumber = BlockNumberFor; type Error = XcmError; type UniversalLocation = T::UniversalLocation; @@ -1488,7 +1481,7 @@ impl QueryHandler for Pallet { responder: impl Into, timeout: BlockNumberFor, match_querier: impl Into, - ) -> Self::QueryId { + ) -> QueryId { Self::do_new_query(responder, None, timeout, match_querier) } @@ -1498,7 +1491,7 @@ impl QueryHandler for Pallet { message: &mut Xcm<()>, responder: impl Into, timeout: Self::BlockNumber, - ) -> Result { + ) -> Result { let responder = responder.into(); let destination = Self::UniversalLocation::get() .invert_target(&responder) @@ -1511,7 +1504,7 @@ impl QueryHandler for Pallet { } /// Removes response when ready and emits [Event::ResponseTaken] event. - fn take_response(query_id: Self::QueryId) -> QueryResponseStatus { + fn take_response(query_id: QueryId) -> QueryResponseStatus { match Queries::::get(query_id) { Some(QueryStatus::Ready { response, at }) => match response.try_into() { Ok(response) => { @@ -1528,7 +1521,7 @@ impl QueryHandler for Pallet { } #[cfg(feature = "runtime-benchmarks")] - fn expect_response(id: Self::QueryId, response: Response) { + fn expect_response(id: QueryId, response: Response) { let response = response.into(); Queries::::insert( id, @@ -1991,6 +1984,7 @@ impl Pallet { ]); Ok(Xcm(vec![ WithdrawAsset(assets.into()), + SetFeesMode { jit_withdraw: true }, InitiateReserveWithdraw { assets: Wild(AllCounted(max_assets)), reserve, @@ -2363,6 +2357,37 @@ impl Pallet { AccountIdConversion::::into_account_truncating(&ID) } + pub fn query_xcm_weight(message: VersionedXcm<()>) -> Result { + let message = + Xcm::<()>::try_from(message).map_err(|_| FeePaymentError::VersionedConversionFailed)?; + + T::Weigher::weight(&mut message.into()).map_err(|()| { + log::error!(target: "xcm::pallet_xcm::query_xcm_weight", "Error when querying XCM weight"); + FeePaymentError::WeightNotComputable + }) + } + + pub fn query_delivery_fees( + destination: VersionedLocation, + message: VersionedXcm<()>, + ) -> Result { + let result_version = destination.identify_version().max(message.identify_version()); + + let destination = + destination.try_into().map_err(|_| FeePaymentError::VersionedConversionFailed)?; + + let message = message.try_into().map_err(|_| FeePaymentError::VersionedConversionFailed)?; + + let (_, fees) = validate_send::(destination, message).map_err(|error| { + log::error!(target: "xcm::pallet_xcm::query_delivery_fees", "Error when querying delivery fees: {:?}", error); + FeePaymentError::Unroutable + })?; + + VersionedAssets::from(fees) + .into_version(result_version) + .map_err(|_| FeePaymentError::VersionedConversionFailed) + } + /// Create a new expectation of a query response with the querier being here. fn do_new_query( responder: impl Into, diff --git a/polkadot/xcm/pallet-xcm/src/migration.rs b/polkadot/xcm/pallet-xcm/src/migration.rs index 018436aa3c93a9291a99820a57118d119f5a0828..b157e6b5c3d5f27d206b73a1b040ca5d7944b8f2 100644 --- a/polkadot/xcm/pallet-xcm/src/migration.rs +++ b/polkadot/xcm/pallet-xcm/src/migration.rs @@ -19,7 +19,7 @@ use crate::{ }; use frame_support::{ pallet_prelude::*, - traits::{OnRuntimeUpgrade, StorageVersion}, + traits::{OnRuntimeUpgrade, StorageVersion, UncheckedOnRuntimeUpgrade}, weights::Weight, }; @@ -35,7 +35,7 @@ pub mod v1 { /// /// Use experimental [`MigrateToV1`] instead. pub struct VersionUncheckedMigrateToV1(sp_std::marker::PhantomData); - impl OnRuntimeUpgrade for VersionUncheckedMigrateToV1 { + impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateToV1 { fn on_runtime_upgrade() -> Weight { let mut weight = T::DbWeight::get().reads(1); diff --git a/polkadot/xcm/pallet-xcm/src/mock.rs b/polkadot/xcm/pallet-xcm/src/mock.rs index b29562fc833b75a96e924d9f8cb450a58256c057..2cc228476ba8e18dace2fc950d7253cba61cd8a4 100644 --- a/polkadot/xcm/pallet-xcm/src/mock.rs +++ b/polkadot/xcm/pallet-xcm/src/mock.rs @@ -361,6 +361,10 @@ parameter_types! { 0, [Parachain(FOREIGN_ASSET_RESERVE_PARA_ID)] ); + pub PaidParaForeignReserveLocation: Location = Location::new( + 0, + [Parachain(Para3000::get())] + ); pub ForeignAsset: Asset = Asset { fun: Fungible(10), id: AssetId(Location::new( @@ -368,6 +372,13 @@ parameter_types! { [Parachain(FOREIGN_ASSET_RESERVE_PARA_ID), FOREIGN_ASSET_INNER_JUNCTION], )), }; + pub PaidParaForeignAsset: Asset = Asset { + fun: Fungible(10), + id: AssetId(Location::new( + 0, + [Parachain(Para3000::get())], + )), + }; pub UsdcReserveLocation: Location = Location::new( 0, [Parachain(USDC_RESERVE_PARA_ID)] @@ -450,6 +461,8 @@ parameter_types! { pub TrustedFilteredTeleport: (AssetFilter, Location) = (FilteredTeleportAsset::get().into(), FilteredTeleportLocation::get()); pub TeleportUsdtToForeign: (AssetFilter, Location) = (Usdt::get().into(), ForeignReserveLocation::get()); pub TrustedForeign: (AssetFilter, Location) = (ForeignAsset::get().into(), ForeignReserveLocation::get()); + pub TrustedPaidParaForeign: (AssetFilter, Location) = (PaidParaForeignAsset::get().into(), PaidParaForeignReserveLocation::get()); + pub TrustedUsdc: (AssetFilter, Location) = (Usdc::get().into(), UsdcReserveLocation::get()); pub const MaxInstructions: u32 = 100; pub const MaxAssetsIntoHolding: u32 = 64; @@ -483,7 +496,7 @@ impl xcm_executor::Config for XcmConfig { type XcmSender = XcmRouter; type AssetTransactor = AssetTransactors; type OriginConverter = LocalOriginConverter; - type IsReserve = (Case, Case); + type IsReserve = (Case, Case, Case); type IsTeleporter = ( Case, Case, diff --git a/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs b/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs index 27be5cce145859a999c9e45ec2ca4aa32de3c641..7dc05c1cc70e69f7de32d26356eaacdf52d9debf 100644 --- a/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs +++ b/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs @@ -31,13 +31,17 @@ use sp_runtime::{traits::AccountIdConversion, DispatchError, ModuleError}; use xcm::prelude::*; use xcm_executor::traits::ConvertLocation; -// Helper function to deduplicate testing different teleport types. -fn do_test_and_verify_teleport_assets( - origin_location: Location, - expected_beneficiary: Location, - call: Call, - expected_weight_limit: WeightLimit, -) { +/// Test `limited_teleport_assets` +/// +/// Asserts that the sender's balance is decreased as a result of execution of +/// local effects. +#[test] +fn limited_teleport_assets_works() { + let origin_location: Location = AccountId32 { network: None, id: ALICE.into() }.into(); + let expected_beneficiary: Location = AccountId32 { network: None, id: BOB.into() }.into(); + let weight_limit = WeightLimit::Limited(Weight::from_parts(5000, 5000)); + let expected_weight_limit = weight_limit.clone(); + let balances = vec![ (ALICE, INITIAL_BALANCE), (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), @@ -47,7 +51,14 @@ fn do_test_and_verify_teleport_assets( let weight = BaseXcmWeight::get() * 2; assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); // call extrinsic - call(); + assert_ok!(XcmPallet::limited_teleport_assets( + RuntimeOrigin::signed(ALICE), + Box::new(RelayLocation::get().into()), + Box::new(expected_beneficiary.clone().into()), + Box::new((Here, SEND_AMOUNT).into()), + 0, + weight_limit, + )); assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); assert_eq!( sent_xcm(), @@ -88,57 +99,6 @@ fn do_test_and_verify_teleport_assets( }); } -/// Test `teleport_assets` -/// -/// Asserts that the sender's balance is decreased as a result of execution of -/// local effects. -#[test] -fn teleport_assets_works() { - let origin_location: Location = AccountId32 { network: None, id: ALICE.into() }.into(); - let beneficiary: Location = AccountId32 { network: None, id: BOB.into() }.into(); - do_test_and_verify_teleport_assets( - origin_location.clone(), - beneficiary.clone(), - || { - assert_ok!(XcmPallet::teleport_assets( - RuntimeOrigin::signed(ALICE), - Box::new(RelayLocation::get().into()), - Box::new(beneficiary.into()), - Box::new((Here, SEND_AMOUNT).into()), - 0, - )); - }, - Unlimited, - ); -} - -/// Test `limited_teleport_assets` -/// -/// Asserts that the sender's balance is decreased as a result of execution of -/// local effects. -#[test] -fn limited_teleport_assets_works() { - let origin_location: Location = AccountId32 { network: None, id: ALICE.into() }.into(); - let beneficiary: Location = AccountId32 { network: None, id: BOB.into() }.into(); - let weight_limit = WeightLimit::Limited(Weight::from_parts(5000, 5000)); - let expected_weight_limit = weight_limit.clone(); - do_test_and_verify_teleport_assets( - origin_location.clone(), - beneficiary.clone(), - || { - assert_ok!(XcmPallet::limited_teleport_assets( - RuntimeOrigin::signed(ALICE), - Box::new(RelayLocation::get().into()), - Box::new(beneficiary.into()), - Box::new((Here, SEND_AMOUNT).into()), - 0, - weight_limit, - )); - }, - expected_weight_limit, - ); -} - /// `limited_teleport_assets` should fail for filtered assets #[test] fn limited_teleport_filtered_assets_disallowed() { @@ -184,12 +144,13 @@ fn reserve_transfer_assets_with_paid_router_works() { let dest: Location = Junction::AccountId32 { network: None, id: user_account.clone().into() }.into(); assert_eq!(Balances::total_balance(&user_account), INITIAL_BALANCE); - assert_ok!(XcmPallet::reserve_transfer_assets( + assert_ok!(XcmPallet::limited_reserve_transfer_assets( RuntimeOrigin::signed(user_account.clone()), Box::new(Parachain(paid_para_id).into()), Box::new(dest.clone().into()), Box::new((Here, SEND_AMOUNT).into()), 0, + Unlimited, )); // XCM_FEES_NOT_WAIVED_USER_ACCOUNT spent amount @@ -248,7 +209,7 @@ fn reserve_transfer_assets_with_paid_router_works() { pub(crate) fn set_up_foreign_asset( reserve_para_id: u32, inner_junction: Option, - benficiary: AccountId, + beneficiary: AccountId, initial_amount: u128, is_sufficient: bool, ) -> (Location, AccountId, Location) { @@ -276,7 +237,7 @@ pub(crate) fn set_up_foreign_asset( assert_ok!(AssetsPallet::mint( RuntimeOrigin::signed(BOB), foreign_asset_id_location.clone(), - benficiary, + beneficiary, initial_amount )); @@ -2604,3 +2565,174 @@ fn teleport_assets_using_destination_reserve_fee_disallowed() { expected_result, ); } + +/// Test `tested_call` transferring single asset using remote reserve. +/// +/// Transferring Para3000 asset (`Para3000` reserve) to +/// `OTHER_PARA_ID` (no teleport trust), therefore triggering remote reserve. +/// Using the same asset asset (Para3000 reserve) for fees. +/// +/// Asserts that the sender's balance is decreased and the beneficiary's balance +/// is increased. Verifies the correct message is sent and event is emitted. +/// +/// Verifies that XCM router fees (`SendXcm::validate` -> `Assets`) are withdrawn from correct +/// user account and deposited to a correct target account (`XcmFeesTargetAccount`). +/// Verifies `expected_result`. +fn remote_asset_reserve_and_remote_fee_reserve_paid_call( + tested_call: Call, + expected_result: DispatchResult, +) where + Call: FnOnce( + OriginFor, + Box, + Box, + Box, + u32, + WeightLimit, + ) -> DispatchResult, +{ + let weight = BaseXcmWeight::get() * 3; + let user_account = AccountId::from(XCM_FEES_NOT_WAIVED_USER_ACCOUNT); + let xcm_router_fee_amount = Para3000PaymentAmount::get(); + let paid_para_id = Para3000::get(); + let balances = vec![ + (user_account.clone(), INITIAL_BALANCE), + (ParaId::from(paid_para_id).into_account_truncating(), INITIAL_BALANCE), + (XcmFeesTargetAccount::get(), INITIAL_BALANCE), + ]; + let beneficiary: Location = Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset BLA + let foreign_initial_amount = 142; + let (reserve_location, _, foreign_asset_id_location) = set_up_foreign_asset( + paid_para_id, + None, + user_account.clone(), + foreign_initial_amount, + true, + ); + + // transfer destination is another chain that is not the reserve location + // the goal is to trigger the remoteReserve case + let dest = RelayLocation::get().pushed_with_interior(Parachain(OTHER_PARA_ID)).unwrap(); + + let transferred_asset: Assets = (foreign_asset_id_location.clone(), SEND_AMOUNT).into(); + + // balances checks before + assert_eq!( + AssetsPallet::balance(foreign_asset_id_location.clone(), user_account.clone()), + foreign_initial_amount + ); + assert_eq!(Balances::free_balance(user_account.clone()), INITIAL_BALANCE); + + // do the transfer + let result = tested_call( + RuntimeOrigin::signed(user_account.clone()), + Box::new(dest.clone().into()), + Box::new(beneficiary.clone().into()), + Box::new(transferred_asset.into()), + 0 as u32, + Unlimited, + ); + assert_eq!(result, expected_result); + if expected_result.is_err() { + // short-circuit here for tests where we expect failure + return; + } + + let mut last_events = last_events(7).into_iter(); + // asset events + // forceCreate + last_events.next().unwrap(); + // mint tokens + last_events.next().unwrap(); + // burn tokens + last_events.next().unwrap(); + // balance events + // burn delivery fee + last_events.next().unwrap(); + // mint delivery fee + last_events.next().unwrap(); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { + outcome: Outcome::Complete { used: weight } + }) + ); + + // user account spent (transferred) amount + assert_eq!( + AssetsPallet::balance(foreign_asset_id_location.clone(), user_account.clone()), + foreign_initial_amount - SEND_AMOUNT + ); + + // user account spent delivery fees + assert_eq!(Balances::free_balance(user_account), INITIAL_BALANCE - xcm_router_fee_amount); + + // XcmFeesTargetAccount where should lend xcm_router_fee_amount + assert_eq!( + Balances::free_balance(XcmFeesTargetAccount::get()), + INITIAL_BALANCE + xcm_router_fee_amount + ); + + // Verify total and active issuance of foreign BLA have decreased (burned on + // reserve-withdraw) + let expected_issuance = foreign_initial_amount - SEND_AMOUNT; + assert_eq!( + AssetsPallet::total_issuance(foreign_asset_id_location.clone()), + expected_issuance + ); + assert_eq!( + AssetsPallet::active_issuance(foreign_asset_id_location.clone()), + expected_issuance + ); + + let context = UniversalLocation::get(); + let foreign_id_location_reanchored = + foreign_asset_id_location.reanchored(&dest, &context).unwrap(); + let dest_reanchored = dest.reanchored(&reserve_location, &context).unwrap(); + + // Verify sent XCM program + assert_eq!( + sent_xcm(), + vec![( + reserve_location, + // `assets` are burned on source and withdrawn from SA in remote reserve chain + Xcm(vec![ + WithdrawAsset((Location::here(), SEND_AMOUNT).into()), + ClearOrigin, + buy_execution((Location::here(), SEND_AMOUNT / 2)), + DepositReserveAsset { + assets: Wild(AllCounted(1)), + // final destination is `dest` as seen by `reserve` + dest: dest_reanchored, + // message sent onward to `dest` + xcm: Xcm(vec![ + buy_execution((foreign_id_location_reanchored, SEND_AMOUNT / 2)), + DepositAsset { assets: AllCounted(1).into(), beneficiary } + ]) + } + ]) + )] + ); + }); +} +/// Test `transfer_assets` with remote asset reserve and remote fee reserve. +#[test] +fn transfer_assets_with_remote_asset_reserve_and_remote_asset_fee_reserve_paid_works() { + let expected_result = Ok(()); + remote_asset_reserve_and_remote_fee_reserve_paid_call( + XcmPallet::transfer_assets, + expected_result, + ); +} +/// Test `limited_reserve_transfer_assets` with remote asset reserve and remote fee reserve. +#[test] +fn limited_reserve_transfer_assets_with_remote_asset_reserve_and_remote_asset_fee_reserve_paid_works( +) { + let expected_result = Ok(()); + remote_asset_reserve_and_remote_fee_reserve_paid_call( + XcmPallet::limited_reserve_transfer_assets, + expected_result, + ); +} diff --git a/polkadot/xcm/pallet-xcm/src/tests/mod.rs b/polkadot/xcm/pallet-xcm/src/tests/mod.rs index 7dd6de9d772a6a2db9d491bc872e9e62467cb2dd..497dae1694a4d02a4f8ed167a733c9aace3b06d7 100644 --- a/polkadot/xcm/pallet-xcm/src/tests/mod.rs +++ b/polkadot/xcm/pallet-xcm/src/tests/mod.rs @@ -20,10 +20,10 @@ pub(crate) mod assets_transfer; use crate::{ mock::*, pallet::SupportedVersion, AssetTraps, Config, CurrentMigration, Error, - ExecuteControllerWeightInfo, LatestVersionedLocation, Pallet, Queries, QueryStatus, - VersionDiscoveryQueue, VersionMigrationStage, VersionNotifiers, VersionNotifyTargets, - WeightInfo, + LatestVersionedLocation, Pallet, Queries, QueryStatus, VersionDiscoveryQueue, + VersionMigrationStage, VersionNotifiers, VersionNotifyTargets, WeightInfo, }; +use codec::Encode; use frame_support::{ assert_err_ignore_postinfo, assert_noop, assert_ok, traits::{Currency, Hooks}, @@ -305,11 +305,12 @@ fn send_works() { ]); let versioned_dest = Box::new(RelayLocation::get().into()); - let versioned_message = Box::new(VersionedXcm::from(message.clone())); - assert_ok!(XcmPallet::send( + let versioned_message = VersionedXcm::from(message.clone()); + let encoded_versioned_message = versioned_message.encode().try_into().unwrap(); + assert_ok!(XcmPallet::send_blob( RuntimeOrigin::signed(ALICE), versioned_dest, - versioned_message + encoded_versioned_message )); let sent_message = Xcm(Some(DescendOrigin(sender.clone().try_into().unwrap())) .into_iter() @@ -341,16 +342,16 @@ fn send_fails_when_xcm_router_blocks() { ]; new_test_ext_with_balances(balances).execute_with(|| { let sender: Location = Junction::AccountId32 { network: None, id: ALICE.into() }.into(); - let message = Xcm(vec![ + let message = Xcm::<()>(vec![ ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), buy_execution((Parent, SEND_AMOUNT)), DepositAsset { assets: AllCounted(1).into(), beneficiary: sender }, ]); assert_noop!( - XcmPallet::send( + XcmPallet::send_blob( RuntimeOrigin::signed(ALICE), Box::new(Location::ancestor(8).into()), - Box::new(VersionedXcm::from(message.clone())), + VersionedXcm::from(message.clone()).encode().try_into().unwrap(), ), crate::Error::::SendFailure ); @@ -371,13 +372,16 @@ fn execute_withdraw_to_deposit_works() { let weight = BaseXcmWeight::get() * 3; let dest: Location = Junction::AccountId32 { network: None, id: BOB.into() }.into(); assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); - assert_ok!(XcmPallet::execute( + assert_ok!(XcmPallet::execute_blob( RuntimeOrigin::signed(ALICE), - Box::new(VersionedXcm::from(Xcm(vec![ + VersionedXcm::from(Xcm::(vec![ WithdrawAsset((Here, SEND_AMOUNT).into()), buy_execution((Here, SEND_AMOUNT)), DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ]))), + ])) + .encode() + .try_into() + .unwrap(), weight )); assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); @@ -399,18 +403,21 @@ fn trapped_assets_can_be_claimed() { let weight = BaseXcmWeight::get() * 6; let dest: Location = Junction::AccountId32 { network: None, id: BOB.into() }.into(); - assert_ok!(XcmPallet::execute( + assert_ok!(XcmPallet::execute_blob( RuntimeOrigin::signed(ALICE), - Box::new(VersionedXcm::from(Xcm(vec![ + VersionedXcm::from(Xcm(vec![ WithdrawAsset((Here, SEND_AMOUNT).into()), buy_execution((Here, SEND_AMOUNT)), // Don't propagated the error into the result. - SetErrorHandler(Xcm(vec![ClearError])), + SetErrorHandler(Xcm::(vec![ClearError])), // This will make an error. Trap(0), // This would succeed, but we never get to it. DepositAsset { assets: AllCounted(1).into(), beneficiary: dest.clone() }, - ]))), + ])) + .encode() + .try_into() + .unwrap(), weight )); let source: Location = Junction::AccountId32 { network: None, id: ALICE.into() }.into(); @@ -437,13 +444,16 @@ fn trapped_assets_can_be_claimed() { assert_eq!(trapped, expected); let weight = BaseXcmWeight::get() * 3; - assert_ok!(XcmPallet::execute( + assert_ok!(XcmPallet::execute_blob( RuntimeOrigin::signed(ALICE), - Box::new(VersionedXcm::from(Xcm(vec![ + VersionedXcm::from(Xcm::(vec![ ClaimAsset { assets: (Here, SEND_AMOUNT).into(), ticket: Here.into() }, buy_execution((Here, SEND_AMOUNT)), DepositAsset { assets: AllCounted(1).into(), beneficiary: dest.clone() }, - ]))), + ])) + .encode() + .try_into() + .unwrap(), weight )); @@ -453,13 +463,16 @@ fn trapped_assets_can_be_claimed() { // Can't claim twice. assert_err_ignore_postinfo!( - XcmPallet::execute( + XcmPallet::execute_blob( RuntimeOrigin::signed(ALICE), - Box::new(VersionedXcm::from(Xcm(vec![ + VersionedXcm::from(Xcm::(vec![ ClaimAsset { assets: (Here, SEND_AMOUNT).into(), ticket: Here.into() }, buy_execution((Here, SEND_AMOUNT)), DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ]))), + ])) + .encode() + .try_into() + .unwrap(), weight ), Error::::LocalExecutionIncomplete @@ -473,11 +486,13 @@ fn claim_assets_works() { let balances = vec![(ALICE, INITIAL_BALANCE)]; new_test_ext_with_balances(balances).execute_with(|| { // First trap some assets. - let trapping_program = Xcm::builder_unsafe().withdraw_asset((Here, SEND_AMOUNT)).build(); + let trapping_program = Xcm::::builder_unsafe() + .withdraw_asset((Here, SEND_AMOUNT)) + .build(); // Even though assets are trapped, the extrinsic returns success. - assert_ok!(XcmPallet::execute( + assert_ok!(XcmPallet::execute_blob( RuntimeOrigin::signed(ALICE), - Box::new(VersionedXcm::V4(trapping_program)), + VersionedXcm::V4(trapping_program).encode().try_into().unwrap(), BaseXcmWeight::get() * 2, )); assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); @@ -530,9 +545,9 @@ fn incomplete_execute_reverts_side_effects() { assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); let amount_to_send = INITIAL_BALANCE - ExistentialDeposit::get(); let assets: Assets = (Here, amount_to_send).into(); - let result = XcmPallet::execute( + let result = XcmPallet::execute_blob( RuntimeOrigin::signed(ALICE), - Box::new(VersionedXcm::from(Xcm(vec![ + VersionedXcm::from(Xcm::(vec![ // Withdraw + BuyExec + Deposit should work WithdrawAsset(assets.clone()), buy_execution(assets.inner()[0].clone()), @@ -540,7 +555,10 @@ fn incomplete_execute_reverts_side_effects() { // Withdrawing once more will fail because of InsufficientBalance, and we expect to // revert the effects of the above instructions as well WithdrawAsset(assets), - ]))), + ])) + .encode() + .try_into() + .unwrap(), weight, ); // all effects are reverted and balances unchanged for either sender or receiver @@ -552,7 +570,7 @@ fn incomplete_execute_reverts_side_effects() { Err(sp_runtime::DispatchErrorWithPostInfo { post_info: frame_support::dispatch::PostDispatchInfo { actual_weight: Some( - as ExecuteControllerWeightInfo>::execute() + weight + <::WeightInfo>::execute_blob() + weight ), pays_fee: frame_support::dispatch::Pays::Yes, }, diff --git a/polkadot/xcm/src/lib.rs b/polkadot/xcm/src/lib.rs index ba8d726aecff4989417373fc9e14c174d5a5277f..e836486e86e3068b82c3f3a5905836c7704e09f7 100644 --- a/polkadot/xcm/src/lib.rs +++ b/polkadot/xcm/src/lib.rs @@ -48,6 +48,9 @@ mod tests; /// Maximum nesting level for XCM decoding. pub const MAX_XCM_DECODE_DEPTH: u32 = 8; +/// Maximum encoded size. +/// See `decoding_respects_limit` test for more reasoning behind this value. +pub const MAX_XCM_ENCODED_SIZE: u32 = 12402; /// A version of XCM. pub type Version = u32; @@ -443,6 +446,16 @@ impl IntoVersion for VersionedXcm { } } +impl IdentifyVersion for VersionedXcm { + fn identify_version(&self) -> Version { + match self { + Self::V2(_) => v2::VERSION, + Self::V3(_) => v3::VERSION, + Self::V4(_) => v4::VERSION, + } + } +} + impl From> for VersionedXcm { fn from(x: v2::Xcm) -> Self { VersionedXcm::V2(x) diff --git a/polkadot/xcm/src/v2/multilocation.rs b/polkadot/xcm/src/v2/multilocation.rs index 60aa1f6ceadf0fefb991660393dc0cdc91f07700..ac98da8d08c910160a099e92eeef3eacd7e56513 100644 --- a/polkadot/xcm/src/v2/multilocation.rs +++ b/polkadot/xcm/src/v2/multilocation.rs @@ -176,7 +176,7 @@ impl MultiLocation { } /// Consumes `self` and returns a `MultiLocation` suffixed with `new`, or an `Err` with - /// theoriginal value of `self` in case of overflow. + /// the original value of `self` in case of overflow. pub fn pushed_with_interior(self, new: Junction) -> result::Result { match self.interior.pushed_with(new) { Ok(i) => Ok(MultiLocation { interior: i, parents: self.parents }), diff --git a/polkadot/xcm/src/v3/multilocation.rs b/polkadot/xcm/src/v3/multilocation.rs index c588b924ac70842b72678d141a67e4279563f30a..18fe01ec8fa7da6eb1db44ce961098c00a2ac13b 100644 --- a/polkadot/xcm/src/v3/multilocation.rs +++ b/polkadot/xcm/src/v3/multilocation.rs @@ -205,7 +205,7 @@ impl MultiLocation { } /// Consumes `self` and returns a `MultiLocation` suffixed with `new`, or an `Err` with - /// theoriginal value of `self` in case of overflow. + /// the original value of `self` in case of overflow. pub fn pushed_with_interior( self, new: impl Into, diff --git a/polkadot/xcm/src/v4/location.rs b/polkadot/xcm/src/v4/location.rs index 8eedeca011075c546a35963cbc7b87617173ca2d..9275bfdb94923cd38de4d3690aafe70f3ab4b952 100644 --- a/polkadot/xcm/src/v4/location.rs +++ b/polkadot/xcm/src/v4/location.rs @@ -210,7 +210,7 @@ impl Location { } /// Consumes `self` and returns a `Location` suffixed with `new`, or an `Err` with - /// theoriginal value of `self` in case of overflow. + /// the original value of `self` in case of overflow. pub fn pushed_with_interior( self, new: impl Into, diff --git a/polkadot/xcm/src/v4/mod.rs b/polkadot/xcm/src/v4/mod.rs index 30ee485589a28191e61fc351eda0f91d2773d589..6635408282e4849861a62be955e52d2fc4dd1b87 100644 --- a/polkadot/xcm/src/v4/mod.rs +++ b/polkadot/xcm/src/v4/mod.rs @@ -1488,7 +1488,21 @@ mod tests { let encoded = big_xcm.encode(); assert!(Xcm::<()>::decode(&mut &encoded[..]).is_err()); - let nested_xcm = Xcm::<()>(vec![ + let mut many_assets = Assets::new(); + for index in 0..MAX_ITEMS_IN_ASSETS { + many_assets.push((GeneralIndex(index as u128), 1u128).into()); + } + + let full_xcm_pass = + Xcm::<()>(vec![ + TransferAsset { assets: many_assets, beneficiary: Here.into() }; + MAX_INSTRUCTIONS_TO_DECODE as usize + ]); + let encoded = full_xcm_pass.encode(); + assert_eq!(encoded.len(), 12402); + assert!(Xcm::<()>::decode(&mut &encoded[..]).is_ok()); + + let nested_xcm_fail = Xcm::<()>(vec![ DepositReserveAsset { assets: All.into(), dest: Here.into(), @@ -1496,10 +1510,10 @@ mod tests { }; (MAX_INSTRUCTIONS_TO_DECODE / 2) as usize ]); - let encoded = nested_xcm.encode(); + let encoded = nested_xcm_fail.encode(); assert!(Xcm::<()>::decode(&mut &encoded[..]).is_err()); - let even_more_nested_xcm = Xcm::<()>(vec![SetAppendix(nested_xcm); 64]); + let even_more_nested_xcm = Xcm::<()>(vec![SetAppendix(nested_xcm_fail); 64]); let encoded = even_more_nested_xcm.encode(); assert_eq!(encoded.len(), 342530); // This should not decode since the limit is 100 diff --git a/polkadot/xcm/xcm-builder/Cargo.toml b/polkadot/xcm/xcm-builder/Cargo.toml index 10726b0f511909c5cc4509746c09b731d32b2faf..997ca99fb12c831ea63d7dbb199f2c029dabb495 100644 --- a/polkadot/xcm/xcm-builder/Cargo.toml +++ b/polkadot/xcm/xcm-builder/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] impl-trait-for-tuples = "0.2.1" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } xcm = { package = "staging-xcm", path = "..", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../xcm-executor", default-features = false } sp-std = { path = "../../../substrate/primitives/std", default-features = false } diff --git a/polkadot/xcm/xcm-builder/src/barriers.rs b/polkadot/xcm/xcm-builder/src/barriers.rs index 80411ab5a2246bf8c7ba96c215b685f5e90aadaf..b8923a4d5c6e88d9034536fa4dacf21c47e9108b 100644 --- a/polkadot/xcm/xcm-builder/src/barriers.rs +++ b/polkadot/xcm/xcm-builder/src/barriers.rs @@ -142,7 +142,7 @@ impl> ShouldExecute for AllowTopLevelPaidExecutionFrom /// In the above example, `AllowUnpaidExecutionFrom` appears once underneath /// `WithComputedOrigin`. This is in order to distinguish between messages which are notionally /// from a derivative location of `ParentLocation` but that just happened to be sent via -/// `ParentLocaction` rather than messages that were sent by the parent. +/// `ParentLocation` rather than messages that were sent by the parent. /// /// Similarly `AllowTopLevelPaidExecutionFrom` appears twice: once inside of `WithComputedOrigin` /// where we provide the list of origins which are derivative origins, and then secondly outside diff --git a/polkadot/xcm/xcm-builder/src/controller.rs b/polkadot/xcm/xcm-builder/src/controller.rs index ba2b1fb44b8eeb0916fdec24d97e36121bc18c8a..6bdde2a967de9195f62fa59544fc3c254b51796b 100644 --- a/polkadot/xcm/xcm-builder/src/controller.rs +++ b/polkadot/xcm/xcm-builder/src/controller.rs @@ -21,6 +21,7 @@ use frame_support::{ dispatch::{DispatchErrorWithPostInfo, WithPostDispatchInfo}, pallet_prelude::DispatchError, + parameter_types, BoundedVec, }; use sp_std::boxed::Box; use xcm::prelude::*; @@ -41,8 +42,12 @@ impl Controller f /// Weight functions needed for [`ExecuteController`]. pub trait ExecuteControllerWeightInfo { - /// Weight for [`ExecuteController::execute`] - fn execute() -> Weight; + /// Weight for [`ExecuteController::execute_blob`] + fn execute_blob() -> Weight; +} + +parameter_types! { + pub const MaxXcmEncodedSize: u32 = xcm::MAX_XCM_ENCODED_SIZE; } /// Execute an XCM locally, for a given origin. @@ -61,19 +66,19 @@ pub trait ExecuteController { /// # Parameters /// /// - `origin`: the origin of the call. - /// - `message`: the XCM program to be executed. + /// - `msg`: the encoded XCM to be executed, should be decodable as a [`VersionedXcm`] /// - `max_weight`: the maximum weight that can be consumed by the execution. - fn execute( + fn execute_blob( origin: Origin, - message: Box>, + message: BoundedVec, max_weight: Weight, ) -> Result; } /// Weight functions needed for [`SendController`]. pub trait SendControllerWeightInfo { - /// Weight for [`SendController::send`] - fn send() -> Weight; + /// Weight for [`SendController::send_blob`] + fn send_blob() -> Weight; } /// Send an XCM from a given origin. @@ -93,11 +98,11 @@ pub trait SendController { /// /// - `origin`: the origin of the call. /// - `dest`: the destination of the message. - /// - `msg`: the XCM to be sent. - fn send( + /// - `msg`: the encoded XCM to be sent, should be decodable as a [`VersionedXcm`] + fn send_blob( origin: Origin, dest: Box, - message: Box>, + message: BoundedVec, ) -> Result; } @@ -132,40 +137,40 @@ pub trait QueryController: QueryHandler { origin: Origin, timeout: Timeout, match_querier: VersionedLocation, - ) -> Result; + ) -> Result; } impl ExecuteController for () { type WeightInfo = (); - fn execute( + fn execute_blob( _origin: Origin, - _message: Box>, + _message: BoundedVec, _max_weight: Weight, ) -> Result { - Err(DispatchError::Other("ExecuteController::execute not implemented") + Err(DispatchError::Other("ExecuteController::execute_blob not implemented") .with_weight(Weight::zero())) } } impl ExecuteControllerWeightInfo for () { - fn execute() -> Weight { + fn execute_blob() -> Weight { Weight::zero() } } impl SendController for () { type WeightInfo = (); - fn send( + fn send_blob( _origin: Origin, _dest: Box, - _message: Box>, + _message: BoundedVec, ) -> Result { Ok(Default::default()) } } impl SendControllerWeightInfo for () { - fn send() -> Weight { + fn send_blob() -> Weight { Weight::zero() } } @@ -186,7 +191,7 @@ impl QueryController for () { _origin: Origin, _timeout: Timeout, _match_querier: VersionedLocation, - ) -> Result { + ) -> Result { Ok(Default::default()) } } diff --git a/polkadot/xcm/xcm-builder/src/lib.rs b/polkadot/xcm/xcm-builder/src/lib.rs index e2af8187136e8eba2b42d7d4a667a69830556032..46d0ad227bfdf8e5e77188165fc259b0c1aec585 100644 --- a/polkadot/xcm/xcm-builder/src/lib.rs +++ b/polkadot/xcm/xcm-builder/src/lib.rs @@ -43,7 +43,7 @@ pub use barriers::{ mod controller; pub use controller::{ - Controller, ExecuteController, ExecuteControllerWeightInfo, QueryController, + Controller, ExecuteController, ExecuteControllerWeightInfo, MaxXcmEncodedSize, QueryController, QueryControllerWeightInfo, QueryHandler, SendController, SendControllerWeightInfo, }; diff --git a/polkadot/xcm/xcm-builder/src/pay.rs b/polkadot/xcm/xcm-builder/src/pay.rs index 6b466483cfad70e52e841b39ef44ebb7362b17cf..35b624b041539e3d89fdb98a56d0910d5033417f 100644 --- a/polkadot/xcm/xcm-builder/src/pay.rs +++ b/polkadot/xcm/xcm-builder/src/pay.rs @@ -88,7 +88,7 @@ impl< type Beneficiary = Beneficiary; type AssetKind = AssetKind; type Balance = u128; - type Id = Querier::QueryId; + type Id = QueryId; type Error = xcm::latest::Error; fn pay( diff --git a/polkadot/xcm/xcm-builder/src/tests/barriers.rs b/polkadot/xcm/xcm-builder/src/tests/barriers.rs index 99a9dd5a6609fe59e5e5ac6af073d8031c41c14c..6516263f57a0936b45171ca262f4001b4315ee95 100644 --- a/polkadot/xcm/xcm-builder/src/tests/barriers.rs +++ b/polkadot/xcm/xcm-builder/src/tests/barriers.rs @@ -309,3 +309,62 @@ fn suspension_should_work() { ); assert_eq!(r, Ok(())); } + +#[test] +fn allow_subscriptions_from_should_work() { + // allow only parent + AllowSubsFrom::set(vec![Location::parent()]); + + let valid_xcm_1 = Xcm::(vec![SubscribeVersion { + query_id: 42, + max_response_weight: Weight::from_parts(5000, 5000), + }]); + let valid_xcm_2 = Xcm::(vec![UnsubscribeVersion]); + let invalid_xcm_1 = Xcm::(vec![ + SetAppendix(Xcm(vec![])), + SubscribeVersion { query_id: 42, max_response_weight: Weight::from_parts(5000, 5000) }, + ]); + let invalid_xcm_2 = Xcm::(vec![ + SubscribeVersion { query_id: 42, max_response_weight: Weight::from_parts(5000, 5000) }, + SetTopic([0; 32]), + ]); + + let test_data = vec![ + ( + valid_xcm_1.clone(), + Parachain(1).into_location(), + // not allowed origin + Err(ProcessMessageError::Unsupported), + ), + (valid_xcm_1, Location::parent(), Ok(())), + ( + valid_xcm_2.clone(), + Parachain(1).into_location(), + // not allowed origin + Err(ProcessMessageError::Unsupported), + ), + (valid_xcm_2, Location::parent(), Ok(())), + ( + invalid_xcm_1, + Location::parent(), + // invalid XCM + Err(ProcessMessageError::BadFormat), + ), + ( + invalid_xcm_2, + Location::parent(), + // invalid XCM + Err(ProcessMessageError::BadFormat), + ), + ]; + + for (mut message, origin, expected_result) in test_data { + let r = AllowSubscriptionsFrom::>::should_execute( + &origin, + message.inner_mut(), + Weight::from_parts(10, 10), + &mut props(Weight::zero()), + ); + assert_eq!(r, expected_result, "Failed for origin: {origin:?} and message: {message:?}"); + } +} diff --git a/polkadot/xcm/xcm-builder/src/tests/mock.rs b/polkadot/xcm/xcm-builder/src/tests/mock.rs index 4bf347ea7713e91b301b8642e1c2d1c8121cee84..3d03ab054248d907c89fe0f7a9d2fbb21896ace2 100644 --- a/polkadot/xcm/xcm-builder/src/tests/mock.rs +++ b/polkadot/xcm/xcm-builder/src/tests/mock.rs @@ -38,7 +38,7 @@ pub use sp_std::{ collections::{btree_map::BTreeMap, btree_set::BTreeSet}, fmt::Debug, }; -pub use xcm::latest::{prelude::*, Weight}; +pub use xcm::latest::{prelude::*, QueryId, Weight}; use xcm_executor::traits::{Properties, QueryHandler, QueryResponseStatus}; pub use xcm_executor::{ traits::{ @@ -414,7 +414,6 @@ pub struct TestQueryHandler(core::marker::PhantomData<(T, BlockN impl QueryHandler for TestQueryHandler { - type QueryId = u64; type BlockNumber = BlockNumber; type Error = XcmError; type UniversalLocation = T::UniversalLocation; @@ -423,7 +422,7 @@ impl QueryHandler responder: impl Into, _timeout: Self::BlockNumber, _match_querier: impl Into, - ) -> Self::QueryId { + ) -> QueryId { let query_id = 1; expect_response(query_id, responder.into()); query_id @@ -433,7 +432,7 @@ impl QueryHandler message: &mut Xcm<()>, responder: impl Into, timeout: Self::BlockNumber, - ) -> Result { + ) -> Result { let responder = responder.into(); let destination = Self::UniversalLocation::get() .invert_target(&responder) @@ -445,7 +444,7 @@ impl QueryHandler Ok(query_id) } - fn take_response(query_id: Self::QueryId) -> QueryResponseStatus { + fn take_response(query_id: QueryId) -> QueryResponseStatus { QUERIES .with(|q| { q.borrow().get(&query_id).and_then(|v| match v { @@ -460,7 +459,7 @@ impl QueryHandler } #[cfg(feature = "runtime-benchmarks")] - fn expect_response(_id: Self::QueryId, _response: xcm::latest::Response) { + fn expect_response(_id: QueryId, _response: xcm::latest::Response) { // Unnecessary since it's only a test implementation } } diff --git a/polkadot/xcm/xcm-builder/src/tests/mod.rs b/polkadot/xcm/xcm-builder/src/tests/mod.rs index e11caf6282be70314d4245a413194f41c7e1082f..63d254a1067582d3359c6a7dc1b27102bd1e45e7 100644 --- a/polkadot/xcm/xcm-builder/src/tests/mod.rs +++ b/polkadot/xcm/xcm-builder/src/tests/mod.rs @@ -36,6 +36,7 @@ mod locking; mod origins; mod pay; mod querying; +mod routing; mod transacting; mod version_subscriptions; mod weight; diff --git a/polkadot/xcm/xcm-builder/src/tests/origins.rs b/polkadot/xcm/xcm-builder/src/tests/origins.rs index c717d1e2af8a2f09e910dbf83fe51e6b821a6872..b6a1a9f1052a4757e7596927e10e1af73bd7b187 100644 --- a/polkadot/xcm/xcm-builder/src/tests/origins.rs +++ b/polkadot/xcm/xcm-builder/src/tests/origins.rs @@ -80,8 +80,8 @@ fn universal_origin_should_work() { fn export_message_should_work() { // Bridge chain (assumed to be Relay) lets Parachain #1 have message execution for free. AllowUnpaidFrom::set(vec![[Parachain(1)].into()]); - // Local parachain #1 issues a transfer asset on Polkadot Relay-chain, transfering 100 Planck to - // Polkadot parachain #2. + // Local parachain #1 issues a transfer asset on Polkadot Relay-chain, transferring 100 Planck + // to Polkadot parachain #2. let expected_message = Xcm(vec![TransferAsset { assets: (Here, 100u128).into(), beneficiary: Parachain(2).into(), diff --git a/polkadot/xcm/xcm-builder/src/tests/routing.rs b/polkadot/xcm/xcm-builder/src/tests/routing.rs new file mode 100644 index 0000000000000000000000000000000000000000..28117d647a086907dc490217d02d6cbd612c7b88 --- /dev/null +++ b/polkadot/xcm/xcm-builder/src/tests/routing.rs @@ -0,0 +1,95 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use super::*; +use frame_support::{assert_ok, traits::Everything}; +use xcm_executor::traits::Properties; + +fn props() -> Properties { + Properties { weight_credit: Weight::zero(), message_id: None } +} + +#[test] +fn trailing_set_topic_as_id_with_unique_topic_should_work() { + type AllowSubscriptions = AllowSubscriptionsFrom; + + // check the validity of XCM for the `AllowSubscriptions` barrier + let valid_xcm = Xcm::<()>(vec![SubscribeVersion { + query_id: 42, + max_response_weight: Weight::from_parts(5000, 5000), + }]); + assert_eq!( + AllowSubscriptions::should_execute( + &Location::parent(), + valid_xcm.clone().inner_mut(), + Weight::from_parts(10, 10), + &mut props(), + ), + Ok(()) + ); + + // simulate sending `valid_xcm` with the `WithUniqueTopic` router + let mut sent_xcm = sp_io::TestExternalities::default().execute_with(|| { + assert_ok!(send_xcm::>(Location::parent(), valid_xcm,)); + sent_xcm() + }); + assert_eq!(1, sent_xcm.len()); + + // `sent_xcm` should contain `SubscribeVersion` and have `SetTopic` added + let mut sent_xcm = sent_xcm.remove(0).1; + let _ = sent_xcm + .0 + .matcher() + .assert_remaining_insts(2) + .expect("two instructions") + .match_next_inst(|instr| match instr { + SubscribeVersion { .. } => Ok(()), + _ => Err(ProcessMessageError::BadFormat), + }) + .expect("expected instruction `SubscribeVersion`") + .match_next_inst(|instr| match instr { + SetTopic(..) => Ok(()), + _ => Err(ProcessMessageError::BadFormat), + }) + .expect("expected instruction `SetTopic`"); + + // `sent_xcm` contains `SetTopic` and is now invalid for `AllowSubscriptions` + assert_eq!( + AllowSubscriptions::should_execute( + &Location::parent(), + sent_xcm.clone().inner_mut(), + Weight::from_parts(10, 10), + &mut props(), + ), + Err(ProcessMessageError::BadFormat) + ); + + // let's apply `TrailingSetTopicAsId` before `AllowSubscriptions` + let mut props = props(); + assert!(props.message_id.is_none()); + + // should pass, and the `message_id` is set + assert_eq!( + TrailingSetTopicAsId::::should_execute( + &Location::parent(), + sent_xcm.clone().inner_mut(), + Weight::from_parts(10, 10), + &mut props, + ), + Ok(()) + ); + assert!(props.message_id.is_some()); +} diff --git a/polkadot/xcm/xcm-builder/src/tests/version_subscriptions.rs b/polkadot/xcm/xcm-builder/src/tests/version_subscriptions.rs index e29e3a546615b3aa725d30418e2e173fe67db920..01047fde989f9720f410c9067bf3bffd3cac9880 100644 --- a/polkadot/xcm/xcm-builder/src/tests/version_subscriptions.rs +++ b/polkadot/xcm/xcm-builder/src/tests/version_subscriptions.rs @@ -27,16 +27,32 @@ fn simple_version_subscriptions_should_work() { ]); let mut hash = fake_message_hash(&message); let weight_limit = Weight::from_parts(20, 20); - let r = XcmExecutor::::prepare_and_execute( - origin, - message, - &mut hash, - weight_limit, - Weight::zero(), + + // this case fails because the origin is not allowed + assert_eq!( + XcmExecutor::::prepare_and_execute( + origin, + message.clone(), + &mut hash, + weight_limit, + Weight::zero(), + ), + Outcome::Error { error: XcmError::Barrier } + ); + + // this case fails because the additional `SetAppendix` instruction is not allowed in the + // `AllowSubscriptionsFrom` + assert_eq!( + XcmExecutor::::prepare_and_execute( + Parent, + message, + &mut hash, + weight_limit, + Weight::zero(), + ), + Outcome::Error { error: XcmError::Barrier } ); - assert_eq!(r, Outcome::Error { error: XcmError::Barrier }); - let origin = Parachain(1000); let message = Xcm::(vec![SubscribeVersion { query_id: 42, max_response_weight: Weight::from_parts(5000, 5000), diff --git a/polkadot/xcm/xcm-builder/tests/scenarios.rs b/polkadot/xcm/xcm-builder/tests/scenarios.rs index db37f85acdbbac2edfaa278d33ac7654b0efcee5..ee1aeffbb4e71d5c42cb92376395961d4d95de81 100644 --- a/polkadot/xcm/xcm-builder/tests/scenarios.rs +++ b/polkadot/xcm/xcm-builder/tests/scenarios.rs @@ -132,7 +132,7 @@ fn report_holding_works() { assets: AllCounted(1).into(), beneficiary: OnlyChild.into(), // invalid destination }, - // is not triggered becasue the deposit fails + // is not triggered because the deposit fails ReportHolding { response_info: response_info.clone(), assets: All.into() }, ]); let mut hash = fake_message_hash(&message); diff --git a/polkadot/xcm/xcm-executor/Cargo.toml b/polkadot/xcm/xcm-executor/Cargo.toml index 71bd58073db6d7247cf5048d4a684aa19ea79bfd..aebc768bb906fd38d372d6d735a9e6bc0390ae9b 100644 --- a/polkadot/xcm/xcm-executor/Cargo.toml +++ b/polkadot/xcm/xcm-executor/Cargo.toml @@ -13,7 +13,7 @@ workspace = true impl-trait-for-tuples = "0.2.2" environmental = { version = "1.1.4", default-features = false } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } xcm = { package = "staging-xcm", path = "..", default-features = false } sp-std = { path = "../../../substrate/primitives/std", default-features = false } sp-io = { path = "../../../substrate/primitives/io", default-features = false } diff --git a/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml b/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml index 1e572e6210a27a7469f827bbe5d076c01c84f032..9c9c53f0ee1ba497f044477ec857afb6774a1f05 100644 --- a/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml +++ b/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml @@ -14,7 +14,7 @@ workspace = true codec = { package = "parity-scale-codec", version = "3.6.1" } frame-support = { path = "../../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../../substrate/frame/system" } -futures = "0.3.21" +futures = "0.3.30" pallet-transaction-payment = { path = "../../../../substrate/frame/transaction-payment" } pallet-xcm = { path = "../../pallet-xcm" } polkadot-test-client = { path = "../../../node/test/client" } diff --git a/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs b/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs index da7fc0d97825f24e1eea91e441cecef11a136e5f..1d1ee40d092c6d251b200439971986b9e0d6abd7 100644 --- a/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs +++ b/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs @@ -328,7 +328,7 @@ fn query_response_elicits_handler() { /// Simulates a cross-chain message from Parachain to Parachain through Relay Chain /// that deposits assets into the reserve of the destination. -/// Regression test for `DepostiReserveAsset` changes in +/// Regression test for `DepositReserveAsset` changes in /// #[test] fn deposit_reserve_asset_works_for_any_xcm_sender() { diff --git a/polkadot/xcm/xcm-executor/src/traits/on_response.rs b/polkadot/xcm/xcm-executor/src/traits/on_response.rs index 952bd2d0040aca89f21d7a1eabbb6667ccb8a68c..1049bacdca5f9eee964cf810835c69b7ba1ba1ea 100644 --- a/polkadot/xcm/xcm-executor/src/traits/on_response.rs +++ b/polkadot/xcm/xcm-executor/src/traits/on_response.rs @@ -16,11 +16,8 @@ use crate::{Junctions::Here, Xcm}; use core::result; -use frame_support::{ - pallet_prelude::{Get, TypeInfo}, - parameter_types, -}; -use parity_scale_codec::{Decode, Encode, FullCodec, MaxEncodedLen}; +use frame_support::{pallet_prelude::Get, parameter_types}; +use parity_scale_codec::{Decode, Encode}; use sp_arithmetic::traits::Zero; use sp_std::fmt::Debug; use xcm::latest::{ @@ -115,15 +112,6 @@ pub enum QueryResponseStatus { /// Provides methods to expect responses from XCMs and query their status. pub trait QueryHandler { - type QueryId: From - + FullCodec - + MaxEncodedLen - + TypeInfo - + Clone - + Eq - + PartialEq - + Debug - + Copy; type BlockNumber: Zero + Encode; type Error; type UniversalLocation: Get; @@ -151,14 +139,14 @@ pub trait QueryHandler { message: &mut Xcm<()>, responder: impl Into, timeout: Self::BlockNumber, - ) -> result::Result; + ) -> result::Result; /// Attempt to remove and return the response of query with ID `query_id`. - fn take_response(id: Self::QueryId) -> QueryResponseStatus; + fn take_response(id: QueryId) -> QueryResponseStatus; /// Makes sure to expect a response with the given id. #[cfg(feature = "runtime-benchmarks")] - fn expect_response(id: Self::QueryId, response: Response); + fn expect_response(id: QueryId, response: Response); } parameter_types! { @@ -168,17 +156,16 @@ parameter_types! { impl QueryHandler for () { type BlockNumber = u64; type Error = (); - type QueryId = u64; type UniversalLocation = UniversalLocation; - fn take_response(_query_id: Self::QueryId) -> QueryResponseStatus { + fn take_response(_query_id: QueryId) -> QueryResponseStatus { QueryResponseStatus::NotFound } fn new_query( _responder: impl Into, _timeout: Self::BlockNumber, _match_querier: impl Into, - ) -> Self::QueryId { + ) -> QueryId { 0u64 } @@ -186,10 +173,10 @@ impl QueryHandler for () { _message: &mut Xcm<()>, _responder: impl Into, _timeout: Self::BlockNumber, - ) -> Result { + ) -> Result { Err(()) } #[cfg(feature = "runtime-benchmarks")] - fn expect_response(_id: Self::QueryId, _response: crate::Response) {} + fn expect_response(_id: QueryId, _response: crate::Response) {} } diff --git a/polkadot/xcm/xcm-executor/src/traits/should_execute.rs b/polkadot/xcm/xcm-executor/src/traits/should_execute.rs index 449e82b5a6e2c3ecaf43b35608222b88ae6c8bcc..e76d56bfe6164d16487f65ed79c1a1a58a9949fe 100644 --- a/polkadot/xcm/xcm-executor/src/traits/should_execute.rs +++ b/polkadot/xcm/xcm-executor/src/traits/should_execute.rs @@ -18,7 +18,7 @@ use frame_support::traits::ProcessMessageError; use sp_std::result::Result; use xcm::latest::{Instruction, Location, Weight, XcmHash}; -/// Properyies of an XCM message and its imminent execution. +/// Properties of an XCM message and its imminent execution. #[derive(Clone, Eq, PartialEq, Debug)] pub struct Properties { /// The amount of weight that the system has determined this @@ -33,9 +33,9 @@ pub struct Properties { /// Trait to determine whether the execution engine should actually execute a given XCM. /// /// Can be amalgamated into a tuple to have multiple trials. If any of the tuple elements returns -/// `Ok()`, the execution stops. Else, `Err(_)` is returned if all elements reject the message. +/// `Ok(())`, the execution stops. Else, `Err(_)` is returned if all elements reject the message. pub trait ShouldExecute { - /// Returns `true` if the given `message` may be executed. + /// Returns `Ok(())` if the given `message` may be executed. /// /// - `origin`: The origin (sender) of the message. /// - `instructions`: The message itself. diff --git a/polkadot/xcm/xcm-fee-payment-runtime-api/Cargo.toml b/polkadot/xcm/xcm-fee-payment-runtime-api/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..30c7c0bac14f7936d57ec5c86f94bc24ba17013f --- /dev/null +++ b/polkadot/xcm/xcm-fee-payment-runtime-api/Cargo.toml @@ -0,0 +1,40 @@ +[package] +name = "xcm-fee-payment-runtime-api" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +repository.workspace = true +description = "XCM fee payment runtime API" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ + "derive", +] } + +sp-api = { path = "../../../substrate/primitives/api", default-features = false } +scale-info = { version = "2.11.1", default-features = false, features = [ + "derive", + "serde", +] } +sp-std = { path = "../../../substrate/primitives/std", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } +sp-weights = { path = "../../../substrate/primitives/weights", default-features = false } +xcm = { package = "staging-xcm", path = "../", default-features = false } +frame-support = { path = "../../../substrate/frame/support", default-features = false } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-support/std", + "scale-info/std", + "sp-api/std", + "sp-runtime/std", + "sp-std/std", + "sp-weights/std", + "xcm/std", +] diff --git a/polkadot/xcm/xcm-fee-payment-runtime-api/src/lib.rs b/polkadot/xcm/xcm-fee-payment-runtime-api/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..20bf9236f1fbbb46ca50a1b42d6dc4e519a5ae1a --- /dev/null +++ b/polkadot/xcm/xcm-fee-payment-runtime-api/src/lib.rs @@ -0,0 +1,99 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Runtime API definition for xcm transaction payment. + +#![cfg_attr(not(feature = "std"), no_std)] + +use codec::{Decode, Encode}; +use frame_support::pallet_prelude::TypeInfo; +use sp_std::vec::Vec; +use sp_weights::Weight; +use xcm::{Version, VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm}; + +sp_api::decl_runtime_apis! { + /// A trait of XCM payment API. + /// + /// API provides functionality for obtaining: + /// + /// * the weight required to execute an XCM message, + /// * a list of acceptable `AssetId`s for message execution payment, + /// * the cost of the weight in the specified acceptable `AssetId`. + /// * the fees for an XCM message delivery. + /// + /// To determine the execution weight of the calls required for + /// [`xcm::latest::Instruction::Transact`] instruction, `TransactionPaymentCallApi` can be used. + pub trait XcmPaymentApi { + /// Returns a list of acceptable payment assets. + /// + /// # Arguments + /// + /// * `xcm_version`: Version. + fn query_acceptable_payment_assets(xcm_version: Version) -> Result, Error>; + + /// Returns a weight needed to execute a XCM. + /// + /// # Arguments + /// + /// * `message`: `VersionedXcm`. + fn query_xcm_weight(message: VersionedXcm<()>) -> Result; + + /// Converts a weight into a fee for the specified `AssetId`. + /// + /// # Arguments + /// + /// * `weight`: convertible `Weight`. + /// * `asset`: `VersionedAssetId`. + fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result; + + /// Get delivery fees for sending a specific `message` to a `destination`. + /// These always come in a specific asset, defined by the chain. + /// + /// # Arguments + /// * `message`: The message that'll be sent, necessary because most delivery fees are based on the + /// size of the message. + /// * `destination`: The destination to send the message to. Different destinations may use + /// different senders that charge different fees. + fn query_delivery_fees(destination: VersionedLocation, message: VersionedXcm<()>) -> Result; + } +} + +#[derive(Copy, Clone, Encode, Decode, Eq, PartialEq, Debug, TypeInfo)] +pub enum Error { + /// An API part is unsupported. + #[codec(index = 0)] + Unimplemented, + + /// Converting a versioned data structure from one version to another failed. + #[codec(index = 1)] + VersionedConversionFailed, + + /// XCM message weight calculation failed. + #[codec(index = 2)] + WeightNotComputable, + + /// XCM version not able to be handled. + #[codec(index = 3)] + UnhandledXcmVersion, + + /// The given asset is not handled as a fee asset. + #[codec(index = 4)] + AssetNotFound, + + /// Destination is known to be unroutable. + #[codec(index = 5)] + Unroutable, +} diff --git a/polkadot/xcm/xcm-simulator/example/Cargo.toml b/polkadot/xcm/xcm-simulator/example/Cargo.toml index af471df60aba4dbc4d03692e7bfb84d99eb16420..0e13a10a14106c92d56d288d210b57f3888068b2 100644 --- a/polkadot/xcm/xcm-simulator/example/Cargo.toml +++ b/polkadot/xcm/xcm-simulator/example/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } -scale-info = { version = "2.10.0", features = ["derive"] } +scale-info = { version = "2.11.1", features = ["derive"] } log = { workspace = true } frame-system = { path = "../../../../substrate/frame/system" } diff --git a/polkadot/xcm/xcm-simulator/example/src/lib.rs b/polkadot/xcm/xcm-simulator/example/src/lib.rs index d134957fbc1c58a2fa59cfb73860d93d7ba74d5b..56e204bf5718164385e3da924f75ae98c2fcb075 100644 --- a/polkadot/xcm/xcm-simulator/example/src/lib.rs +++ b/polkadot/xcm/xcm-simulator/example/src/lib.rs @@ -250,12 +250,13 @@ mod tests { let withdraw_amount = 123; Relay::execute_with(|| { - assert_ok!(RelayChainPalletXcm::reserve_transfer_assets( + assert_ok!(RelayChainPalletXcm::limited_reserve_transfer_assets( relay_chain::RuntimeOrigin::signed(ALICE), Box::new(Parachain(1).into()), Box::new(AccountId32 { network: None, id: ALICE.into() }.into()), Box::new((Here, withdraw_amount).into()), 0, + Unlimited, )); assert_eq!( relay_chain::Balances::free_balance(&child_account_id(1)), @@ -424,7 +425,7 @@ mod tests { /// Scenario: /// The relay-chain transfers an NFT into a parachain's sovereign account, who then mints a - /// trustless-backed-derivated locally. + /// trustless-backed-derived locally. /// /// Asserts that the parachain accounts are updated as expected. #[test] @@ -479,7 +480,7 @@ mod tests { assert_ok!(ParachainPalletXcm::send_xcm(alice, Parent, message)); }); ParaA::execute_with(|| { - log::debug!(target: "xcm-exceutor", "Hello"); + log::debug!(target: "xcm-executor", "Hello"); assert_eq!( parachain::ForeignUniques::owner((Parent, GeneralIndex(2)).into(), 69u32.into()), Some(ALICE), diff --git a/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs b/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs index 377c77f30a473d178b5fa3c56a79abe7e2c2766a..286d0038e187a45504e9b1092fed4b152ffe9d3d 100644 --- a/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs +++ b/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs @@ -275,6 +275,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = MessageQueueHeapSize; type MaxStale = MessageQueueMaxStale; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = (); type MessageProcessor = MessageProcessor; type QueueChangeHandler = (); type QueuePausedQuery = (); diff --git a/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml b/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml index 30644dc0e0a53fe485261f9d0e52533ba0641603..ca794a07bfb0c73df251d49c2e08a970ea5cf994 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml +++ b/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml @@ -14,7 +14,7 @@ workspace = true codec = { package = "parity-scale-codec", version = "3.6.1" } honggfuzz = "0.5.55" arbitrary = "1.3.2" -scale-info = { version = "2.10.0", features = ["derive"] } +scale-info = { version = "2.11.1", features = ["derive"] } frame-system = { path = "../../../../substrate/frame/system" } frame-support = { path = "../../../../substrate/frame/support" } diff --git a/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs b/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs index 3224df66cbe56fd50ecdc5931274f41332143134..6790b535d169220a3def3e4780af1aa1ef83e759 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs +++ b/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs @@ -232,6 +232,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = MessageQueueHeapSize; type MaxStale = MessageQueueMaxStale; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = (); #[cfg(not(feature = "runtime-benchmarks"))] type MessageProcessor = MessageProcessor; #[cfg(feature = "runtime-benchmarks")] diff --git a/polkadot/zombienet_tests/functional/0007-dispute-freshly-finalized.zndsl b/polkadot/zombienet_tests/functional/0007-dispute-freshly-finalized.zndsl index 62d5a9768f9ebd0b6eee8e6ef92dae7735fbf791..d1ed0250d4da0a5576c8baa7bfd746f6562bf804 100644 --- a/polkadot/zombienet_tests/functional/0007-dispute-freshly-finalized.zndsl +++ b/polkadot/zombienet_tests/functional/0007-dispute-freshly-finalized.zndsl @@ -21,9 +21,9 @@ honest: reports polkadot_parachain_candidate_dispute_concluded{validity="valid"} honest: reports polkadot_parachain_candidate_dispute_concluded{validity="invalid"} is 0 within 100 seconds # Check lag - approval -honest: reports polkadot_parachain_approval_checking_finality_lag is 0 +honest: reports polkadot_parachain_approval_checking_finality_lag is lower than 2 # Check lag - dispute conclusion -honest: reports polkadot_parachain_disputes_finality_lag is 0 +honest: reports polkadot_parachain_disputes_finality_lag is lower than 2 diff --git a/prdoc/.template.prdoc b/prdoc/.template.prdoc index 097741f388c4f2be73c9aed9d2b1dcc7dde32cf2..03a458876dfaaab5f9bc2fe61ab349c0903988e1 100644 --- a/prdoc/.template.prdoc +++ b/prdoc/.template.prdoc @@ -4,7 +4,7 @@ title: ... doc: - - audience: Node Dev + - audience: ... description: | ... diff --git a/prdoc/pr_3246.prdoc b/prdoc/pr_3246.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..19a823e502855e6190da1dd767c04b50027707db --- /dev/null +++ b/prdoc/pr_3246.prdoc @@ -0,0 +1,11 @@ +title: Try State Hook for Beefy. + +doc: + - audience: Runtime User + description: | + Invariants for storage items in the beefy pallet. Enforces the following Invariants: + 1. `Authorities` should not exceed the `MaxAuthorities` capacity. + 2. `NextAuthorities` should not exceed the `MaxAuthorities` capacity. + 3. `ValidatorSetId` must be present in `SetIdSession`. +crates: +- name: pallet-beefy diff --git a/prdoc/pr_3341.prdoc b/prdoc/pr_3341.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..de714fa5a1e5e2407dcb38ede6be9bb5761d35f1 --- /dev/null +++ b/prdoc/pr_3341.prdoc @@ -0,0 +1,18 @@ +title: "Fix `schedule_code_upgrade` when called by the owner/root" + +doc: + - audience: Runtime User + description: | + Fixes `schedule_code_upgrade` when being used by the owner/root. The call is used for + manually upgrading the validation code of a parachain on the relay chain. It was failing + before because the relay chain waited for the parachain to make progress. However, this + call is mostly used for when a parachain are bricked which means that they are not able + anymore to build any blocks. The fix is to schedule the validation code upgrade and then + to enact it at the scheduled block. The enacting happens now without requiring the parachain + to make any progress. + +crates: + - name: polkadot-runtime-common + bump: minor + - name: polkadot-runtime-parachains + bump: major diff --git a/prdoc/pr_3438.prdoc b/prdoc/pr_3438.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..5f4a0e3d57af7a606d6c59dd049008dad03b944a --- /dev/null +++ b/prdoc/pr_3438.prdoc @@ -0,0 +1,13 @@ +title: "Pools: Make PermissionlessWithdraw the default claim permission" + +doc: + - audience: Runtime User + description: | + Makes permissionless withdrawing the default claim permission, giving any network participant + access to claim pool rewards on member's behalf, by default. + +crates: + - name: pallet-nomination-pools + bump: minor + - name: pallet-nomination-pools-benchmarking + bump: minor diff --git a/prdoc/pr_3607.prdoc b/prdoc/pr_3607.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..1a69b25ad255ca1d2e2f717a75db763360e7f4fc --- /dev/null +++ b/prdoc/pr_3607.prdoc @@ -0,0 +1,26 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "XCM fee payment API" + +doc: + - audience: Runtime Dev + description: | + A runtime API was added for estimating the fees required for XCM execution and delivery. + This is the basic building block needed for UIs to accurately estimate fees. + An example implementation is shown in the PR. Ideally it's simple to implement, you only need to call existing parts of your XCM config. + The API looks like so: + ```rust + fn query_acceptable_payment_assets(xcm_version: Version) -> Result, Error>; + fn query_xcm_weight(message: VersionedXcm) -> Result; + fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result; + fn query_delivery_fees(destination: VersionedLocation, message: VersionedXcm<()>) -> Result; + ``` + The first three relate to XCM execution fees, given an XCM, you can query its weight, then which assets are acceptable for buying weight and convert weight to a number of those assets. + The last one takes in a destination and a message you want to send from the runtime you're executing this on, it will give you the delivery fees. + +crates: + - name: xcm-fee-payment-runtime-api + - name: rococo-runtime + - name: westend-runtime + diff --git a/prdoc/pr_3706.prdoc b/prdoc/pr_3706.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..edeb08241bed150f755052ccd3879f3a05e134a2 --- /dev/null +++ b/prdoc/pr_3706.prdoc @@ -0,0 +1,20 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Extrinsic to restore corrupted staking ledgers + +doc: + - audience: Runtime User + description: | + This PR adds a new extrinsic `Call::restore_ledger ` gated by `StakingAdmin` origin that restores a corrupted staking ledger. This extrinsic will be used to recover ledgers that were affected by the issue discussed in https://github.com/paritytech/polkadot-sdk/issues/3245. + The extrinsic will re-write the storage items associated with a stash account provided as input parameter. The data used to reset the ledger can be either i) fetched on-chain or ii) partially/totally set by the input parameters of the call. + + Changes introduced: + - Adds `Call::restore_ledger ` extrinsic to recover a corrupted ledger; + - Adds trait `frame_support::traits::currency::InspectLockableCurrency` to allow external pallets to read current locks given an account and lock ID; + - Implements the `InspectLockableCurrency` in the pallet-balances. + - Adds staking locks try-runtime checks (https://github.com/paritytech/polkadot-sdk/issues/3751) + +crates: + - name: pallet-staking + - name: pallet-balances diff --git a/prdoc/pr_3714.prdoc b/prdoc/pr_3714.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..e276d0d2d3731c2cf626a54f4812b5167269bca6 --- /dev/null +++ b/prdoc/pr_3714.prdoc @@ -0,0 +1,10 @@ +title: Handle legacy lease swaps on coretime + +doc: + - audience: Runtime Dev + description: | + When a `registar::swap` extrinsic is executed it swaps two leases on the relay chain but the + broker chain never knows about this swap. This change notifies the broker chain via a XCM + message for a swap so that it can update its state. +crates: + - name: pallet-broker diff --git a/prdoc/pr_3718.prdoc b/prdoc/pr_3718.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..b2b24cc9704d9a7cb66bb63bf2c8483c4b1f4490 --- /dev/null +++ b/prdoc/pr_3718.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Deprecate scheduler traits v1 and v2 + +doc: + - audience: Runtime Dev + description: | + Add `#[deprecated]` attribute to scheduler traits v1 and v2 to deprecate old versions + +crates: + - name: frame-support + - name: pallet-scheduler diff --git a/prdoc/pr_3738.prdoc b/prdoc/pr_3738.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..cbf19b95c36a0bedf06584552250d4419c712e42 --- /dev/null +++ b/prdoc/pr_3738.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Removed `pallet::getter` usage from `pallet-alliance` + +doc: + - audience: Runtime Dev + description: | + This PR removes `pallet::getter` usage from `pallet-alliance`, and updates dependant code accordingly. + The syntax `StorageItem::::get()` should be used instead. + +crates: + - name: pallet-alliance + bump: major diff --git a/prdoc/pr_3749.prdoc b/prdoc/pr_3749.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..1ebde9670e0f374c8dd079f9d66ec84894b71f6b --- /dev/null +++ b/prdoc/pr_3749.prdoc @@ -0,0 +1,47 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "pallet-xcm: deprecate execute and send in favor of execute_blob and send_blob" + +doc: + - audience: Runtime Dev + description: | + pallet-xcm's extrinsics `execute` and `send` have been marked as deprecated. + Please change their usage to the new `execute_blob` and `send_blob`. + The migration from the old extrinsic to the new is very simple. + If you have your message `xcm: VersionedXcm`, then instead of passing in + `Box::new(xcm)` to both `execute` and `send`, you would pass in + `xcm.encode().try_into()` and handle the potential error of its encoded length + being bigger than `MAX_XCM_ENCODED_SIZE`. + + pallet-contracts takes the XCM encoded now as well. It follows the same API as + `execute_blob` and `send_blob`. + - audience: Runtime User + description: | + pallet-xcm has a new pair of extrinsics, `execute_blob` and `send_blob`. + These are meant to be used instead of `execute` and `send`, which are now deprecated + and will be removed eventually. + These new extrinsics just require you to input the encoded XCM. + There's a new utility in PolkadotJS Apps for encoding XCMs you can use: + https://polkadot.js.org/apps/#/utilities/xcm + Just pass in the encoded XCM to the new extrinsics and you're done. + + pallet-contracts takes the XCM encoded now as well. It follows the same API as + `execute_blob` and `send_blob`. + +crates: +- name: pallet-xcm +- name: staging-xcm +- name: staging-xcm-builder +- name: pallet-contracts +- name: asset-hub-rococo-runtime +- name: asset-hub-westend-runtime +- name: bridge-hub-rococo-runtime +- name: bridge-hub-westend-runtime +- name: collectives-westend-runtime +- name: coretime-rococo-runtime +- name: coretime-westend-runtime +- name: people-rococo-runtime +- name: people-westend-runtime +- name: rococo-runtime +- name: westend-runtime diff --git a/prdoc/pr_3761.prdoc b/prdoc/pr_3761.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..65b8c396fe3b62f6690a427e3e1ebaa1f7ee5204 --- /dev/null +++ b/prdoc/pr_3761.prdoc @@ -0,0 +1,25 @@ +title: "Snowbridge: Synchronize from Snowfork repository" + +doc: + - audience: Runtime Dev + description: | + This PR improves the beacon client to send the execution header along with the message as proof and removes the verification and storing of all execution headers. + If the AH sovereign account is depleted and relayer rewards cannot be paid, the message should still be processed. + +crates: +- name: snowbridge-pallet-ethereum-client + bump: minor +- name: snowbridge-pallet-inbound-queue + bump: minor +- name: snowbridge-beacon-primitives + bump: minor +- name: snowbridge-core + bump: minor +- name: snowbridge-runtime-test-common + bump: minor +- name: asset-hub-rococo-runtime + bump: minor +- name: bridge-hub-rococo-runtime + bump: minor +- name: bridge-hub-rococo-integration-tests + bump: minor diff --git a/prdoc/pr_3792.prdoc b/prdoc/pr_3792.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..cbcdc29a9c6e434403b5d2548a44eb468e6ba30e --- /dev/null +++ b/prdoc/pr_3792.prdoc @@ -0,0 +1,19 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "[pallet-xcm] fix transport fees for remote reserve transfers" + +doc: + - audience: Runtime Dev + description: | + This PR fixes `pallet_xcm::transfer_assets` and + `pallet_xcm::limited_reserve_transfer_assets` extrinsics for transfers + that need to go through remote reserves. The fix is adding a + `SetFeesMode { jit_withdraw: true }` instruction before local execution of + `InitiateReserveWithdraw` so that delivery fees are correctly charged by + the xcm-executor. Without this change, a runtime that has implemented + delivery fees would not be able to execute remote reserve transfers using + these extrinsics. + +crates: + - name: pallet-xcm diff --git a/prdoc/pr_3795.prdoc b/prdoc/pr_3795.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..da01fcbec821cfb1ca391a374a8f166a05111be0 --- /dev/null +++ b/prdoc/pr_3795.prdoc @@ -0,0 +1,14 @@ +title: Enable collators to build on multiple cores + +doc: + - audience: Node Dev + description: | + Introduces a `CoreIndex` parameter in `SubmitCollationParams`. This enables + the collators to make use of potentially multiple cores assigned at some relay + chain block. This extra parameter is used by the collator protocol and collation + generation subsystems to forward the collation to the approapriate backing group. + +crates: +- name: polkadot-node-collation-generation +- name: polkadot-collator-protocol + bump: minor diff --git a/prdoc/pr_3808.prdoc b/prdoc/pr_3808.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..9b50f721df335891130f9ef63c0bf144792beebb --- /dev/null +++ b/prdoc/pr_3808.prdoc @@ -0,0 +1,20 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Fix spelling mistakes in source code + +doc: + - audience: Node Operator + description: | + Some spelling mistakes in log output, error messages and tracing (prometheus/grafana) have been fixed. + - audience: Runtime Dev + description: | + Public crate changes: + - The public trait `RuntimeParameterStore` in `substrate/frame/support` had the associated type renamed from `AggregratedKeyValue` to `AggregatedKeyValue`. + - The public trait `AggregratedKeyValue` in `substrate/frame/support` was similarly renamed to `AggregatedKeyValue`. + - The public methods `test_versioning` and `test_versioning_register_only` of the `TestApi` trait in `substrate/primitives/runtime-interface/test-wasm` had the spelling of `versionning` changed to `versioning`. + - The public functions `read_trie_first_descendant_value` and `read_child_trie_first_descendant_value` in `substrate/primitives/trie` had the spelling of `descedant` changed to `descendant`. +crates: + - name: frame-support + - name: sp-runtime-interface-test-wasm + - name: sp-trie diff --git a/prdoc/pr_3817.prdoc b/prdoc/pr_3817.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..bf9d397122f9d98bf4853ed0895a74f764367e54 --- /dev/null +++ b/prdoc/pr_3817.prdoc @@ -0,0 +1,23 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Parachain Runtime API Implementations into mod apis Refactoring + +doc: + - audience: Runtime Dev + description: | + This PR introduces a refactoring to the runtime API implementations within the parachain template project. The primary changes include enhancing the visibility of `RUNTIME_API_VERSIONS` to `pub` in `impl_runtime_apis.rs`, centralizing API implementations in a new `apis.rs` file, and streamlining `lib.rs`. These changes aim to improve project structure, maintainability, and readability. + + Key Changes: + - `RUNTIME_API_VERSIONS` is now publicly accessible, enhancing module-wide visibility. + - Introduction of `apis.rs` centralizes runtime API implementations, promoting a cleaner and more navigable project structure. + - The main runtime library file, `lib.rs`, has been updated to reflect these structural changes, removing redundant API implementations and simplifying runtime configuration by pointing `VERSION` to the newly exposed `RUNTIME_API_VERSIONS` from `apis.rs`. + + Motivations: + - **Improved Project Structure**: Centralizing API implementations offers a more organized and understandable project layout. + - **Enhanced Readability**: The refactoring efforts aim to declutter `lib.rs`, facilitating easier comprehension for new contributors. + +crates: + - name: sp-api-proc-macro + - name: parachain-template-node + - name: parachain-template-runtime diff --git a/prdoc/pr_3835.prdoc b/prdoc/pr_3835.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..d2f49f8fc1161da7f264ae2a55ebcfd462dcca27 --- /dev/null +++ b/prdoc/pr_3835.prdoc @@ -0,0 +1,54 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "migrations: prevent accidentally using inner unversioned migration instead of `VersionedMigration`" + +doc: + - audience: Runtime Dev + description: | + Currently, it is possible to accidentally use inner unversioned migration instead of `VersionedMigration` + since both implement `OnRuntimeUpgrade`. With this change, we make it clear that `Inner` is not intended + to be used directly. It is achieved by bounding `Inner` to new trait `UncheckedOnRuntimeUpgrade`, which + has the same interface as `OnRuntimeUpgrade`, but can not be used directly for runtime upgrade migrations. + + This change will break all existing migrations passed to `VersionedMigration`. Developers should simply change + those migrations to implement `UncheckedOnRuntimeUpgrade` instead of `OnRuntimeUpgrade`. + + Example: + + ``` + --- a/path/to/migration.rs + +++ b/path/to/migration.rs + @@ -1,7 +1,7 @@ + -impl OnRuntimeUpgrade for MigrateVNToVM { + +impl UncheckedOnRuntimeUpgrade for MigrateVNToVM { + fn on_runtime_upgrade() -> Weight { + // Migration logic here + // Adjust the migration logic if necessary to align with the expectations + // of new `UncheckedOnRuntimeUpgrade` trait. + 0 + } + } + ``` + +crates: + - name: "pallet-example-single-block-migrations" + bump: "major" + - name: "pallet-xcm" + bump: "major" + - name: "pallet-grandpa" + bump: "major" + - name: "pallet-identity" + bump: "major" + - name: "pallet-nomination-pools" + bump: "major" + - name: "pallet-society" + bump: "major" + - name: "frame-support" + bump: "major" + - name: "pallet-uniques" + bump: "major" + - name: "polkadot-runtime-parachains" + bump: "major" + - name: "polkadot-runtime-common" + bump: "major" diff --git a/prdoc/pr_3844.prdoc b/prdoc/pr_3844.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..a92092f91b207e44957bccce59b25cdd983fdac4 --- /dev/null +++ b/prdoc/pr_3844.prdoc @@ -0,0 +1,25 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Add the ability for MessageQueue to process enqueued messages on idle + +doc: + - audience: Runtime Dev + description: | + Add the option to use remaining weight on idle for processing enqueued messages. + This will increase the chances of the messages enqueued during inherent extrinsics to be processed in the same block. + New config types is added on the message-queue `Config` trait: + - `IdleMaxServiceWeight` + + example: + ```rust + parameter_types! { + // The maximum weight to be used from remaining weight for processing enqueued messages on idle + pub const IdleMaxServiceWeight: Weight = Some(Weight); + } + + type IdleMaxServiceWeight = IdleMaxServiceWeight; // or `()` to not use this feature + ``` + +crates: + - name: pallet-message-queue diff --git a/prdoc/pr_3849.prdoc b/prdoc/pr_3849.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..a1372b60ffc65150a7986d951a768b9e365b7f91 --- /dev/null +++ b/prdoc/pr_3849.prdoc @@ -0,0 +1,13 @@ +title: Unrequest a pre-image when it failed to execute + +doc: + - audience: Runtime User + description: | + When a referenda finished the proposal will be scheduled. When it is scheduled, + the pre-image is requested. The pre-image is unrequested after the proposal + was executed. However, if the proposal failed to execute it wasn't unrequested. + Thus, it could not be removed from the on-chain state. This issue is now solved + by ensuring to unrequest the pre-image when it failed to execute. + +crates: + - name: pallet-scheduler diff --git a/prdoc/pr_3850.prdoc b/prdoc/pr_3850.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..8f7ce16076e852853f6edc137b756c83f3a5d924 --- /dev/null +++ b/prdoc/pr_3850.prdoc @@ -0,0 +1,15 @@ +title: Detect incorrect pre-image length when submitting a referenda + +doc: + - audience: Runtime User + description: | + When submitting a referenda the `proposal` is passed as argument. + The `proposal` is most of the time a reference to a `pre-image` and + which also contains the length of the `pre-image`. This pull request + adds some logic to check that if the `pre-image` already exists and if + it exists, it ensures that the length is passed correctly. This prevents + that the referenda can not be executed because of a mismatch of this + length. + +crates: + - name: pallet-referenda diff --git a/prdoc/pr_3854.prdoc b/prdoc/pr_3854.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..cfc8e246d7e1fad7b736725a08e0ce465e224640 --- /dev/null +++ b/prdoc/pr_3854.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Export unified `ParachainHostFunctions` from `cumulus-client-service` + +doc: + - audience: Node Dev + description: | + Exports `ParachainHostFunctions` to have a bundled version of `SubstrateHostFunctions` and + `cumulus_primitives_proof_size_hostfunction::storage_proof_size::HostFunctions`. This increases discoverability and makes + it more obvious that they should be used together in parachain nodes. + +crates: + - name: cumulus-client-service + bump: minor diff --git a/prdoc/pr_3927.prdoc b/prdoc/pr_3927.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..a568636d0bd087030d94e67ee459df4993bc0046 --- /dev/null +++ b/prdoc/pr_3927.prdoc @@ -0,0 +1,13 @@ +title: "pallet-xcm: deprecate transfer extrinsics without weight limit" + +doc: + - audience: Runtime Dev + description: | + pallet-xcm's extrinsics `teleport_assets` and `reserve_transfer_assets` have been + marked as deprecated. Please change their usage to the `limited_teleport_assets` + and `limited_reserve_transfer_assets`, respectively; or use the generic/flexible + `transfer_assets` extrinsic. + +crates: +- name: pallet-xcm + bump: minor diff --git a/prdoc/pr_3950.prdoc b/prdoc/pr_3950.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..a333521898bfa30cffa5c934351e72fd005a5843 --- /dev/null +++ b/prdoc/pr_3950.prdoc @@ -0,0 +1,12 @@ +title: Add `ClaimQueue` wrapper + +doc: + - audience: Node Dev + description: | + Intoduces a new wrapper type: `ClaimQueueSnapshot`. It contains a snapshot of the `ClaimQueue` + at an arbitrary relay chain block. Two methods are exposed to allow access to the claims at + specific depths. + +crates: + - name: polkadot-node-subsystem-util + bump: minor diff --git a/prdoc/schema_user.json b/prdoc/schema_user.json index 1bd0b3b93ee45ea75d9781ef1485898dd86c7ff3..593113cf3260049fa0a7e2476955dc046b9aca35 100644 --- a/prdoc/schema_user.json +++ b/prdoc/schema_user.json @@ -132,7 +132,8 @@ } }, "required": [ - "name" + "name", + "bump" ] }, "migration_db": { @@ -187,6 +188,11 @@ "const": "patch", "title": "Patch", "description": "A bump to the third leftmost non-zero digit of the version number." + }, + { + "const": "none", + "title": "None", + "description": "This change requires no SemVer bump (e.g. change was a test or benchmark)." } ] }, diff --git a/scripts/bridges_update_subtree.sh b/scripts/bridges_update_subtree.sh index 5c5c7a322a163d5db9f7fa4e714c4951f296e1df..2cd6d968d2b24894cc5a8faec02c96618f258d4b 100755 --- a/scripts/bridges_update_subtree.sh +++ b/scripts/bridges_update_subtree.sh @@ -1,6 +1,6 @@ #!/bin/bash -# A script to udpate bridges repo as subtree to Cumulus +# A script to update bridges repo as subtree to Cumulus # Usage: # ./scripts/bridges_update_subtree.sh fetch # ./scripts/bridges_update_subtree.sh patch diff --git a/scripts/snowbridge_update_subtree.sh b/scripts/snowbridge_update_subtree.sh index 2276bb35469f1e13cd58085e36fa49069123bcef..c572eaa18faa8594d4457f5d87cd00dfb790292a 100755 --- a/scripts/snowbridge_update_subtree.sh +++ b/scripts/snowbridge_update_subtree.sh @@ -1,6 +1,6 @@ #!/bin/bash -# A script to udpate bridges repo as subtree to Cumulus +# A script to update bridges repo as subtree to Cumulus # Usage: # ./scripts/update_subtree_snowbridge.sh fetch # ./scripts/update_subtree_snowbridge.sh patch diff --git a/substrate/bin/node/bench/Cargo.toml b/substrate/bin/node/bench/Cargo.toml index d5de1dbe6c44e16980d79a75c54cec8ae42a4a7f..49485fe2a1b9b8017ec560adef2f0f2114b07b5d 100644 --- a/substrate/bin/node/bench/Cargo.toml +++ b/substrate/bin/node/bench/Cargo.toml @@ -44,4 +44,4 @@ lazy_static = "1.4.0" parity-db = "0.4.12" sc-transaction-pool = { path = "../../../client/transaction-pool" } sc-transaction-pool-api = { path = "../../../client/transaction-pool/api" } -futures = { version = "0.3.21", features = ["thread-pool"] } +futures = { version = "0.3.30", features = ["thread-pool"] } diff --git a/substrate/bin/node/cli/Cargo.toml b/substrate/bin/node/cli/Cargo.toml index 8bddbbe04828f19ae437db60c8a6f5480c4e847f..6346063b9d27c4e446e8f4ef24f7e573e7c2ae09 100644 --- a/substrate/bin/node/cli/Cargo.toml +++ b/substrate/bin/node/cli/Cargo.toml @@ -45,7 +45,7 @@ clap = { version = "4.5.3", features = ["derive"], optional = true } codec = { package = "parity-scale-codec", version = "3.6.1" } serde = { features = ["derive"], workspace = true, default-features = true } jsonrpsee = { version = "0.22", features = ["server"] } -futures = "0.3.21" +futures = "0.3.30" log = { workspace = true, default-features = true } rand = "0.8" @@ -129,7 +129,7 @@ sc-block-builder = { path = "../../../client/block-builder" } sp-tracing = { path = "../../../primitives/tracing" } sp-blockchain = { path = "../../../primitives/blockchain" } sp-crypto-hashing = { path = "../../../primitives/crypto/hashing" } -futures = "0.3.21" +futures = "0.3.30" tempfile = "3.1.0" assert_cmd = "2.0.2" nix = { version = "0.26.1", features = ["signal"] } @@ -160,7 +160,7 @@ sp-externalities = { path = "../../../primitives/externalities" } sp-keyring = { path = "../../../primitives/keyring" } sp-runtime = { path = "../../../primitives/runtime" } serde_json = { workspace = true, default-features = true } -scale-info = { version = "2.10.0", features = ["derive", "serde"] } +scale-info = { version = "2.11.1", features = ["derive", "serde"] } sp-trie = { path = "../../../primitives/trie" } sp-state-machine = { path = "../../../primitives/state-machine" } diff --git a/substrate/bin/node/cli/benches/block_production.rs b/substrate/bin/node/cli/benches/block_production.rs index 23a62cc0bd243ebd1c8ade712b3ded2361f9e4c0..d04780d5f9535791f5e904bbc89405355cbbd0ce 100644 --- a/substrate/bin/node/cli/benches/block_production.rs +++ b/substrate/bin/node/cli/benches/block_production.rs @@ -145,7 +145,7 @@ fn prepare_benchmark(client: &FullClient) -> (usize, Vec) { let src = Sr25519Keyring::Alice.pair(); let dst: MultiAddress = Sr25519Keyring::Bob.to_account_id().into(); - // Add as many tranfer extrinsics as possible into a single block. + // Add as many transfer extrinsics as possible into a single block. for nonce in 0.. { let extrinsic: OpaqueExtrinsic = create_extrinsic( client, @@ -179,7 +179,7 @@ fn block_production(c: &mut Criterion) { let node = new_node(tokio_handle.clone()); let client = &*node.client; - // Buliding the very first block is around ~30x slower than any subsequent one, + // Building the very first block is around ~30x slower than any subsequent one, // so let's make sure it's built and imported before we benchmark anything. let mut block_builder = BlockBuilderBuilder::new(client) .on_parent_block(client.chain_info().best_hash) diff --git a/substrate/bin/node/cli/src/cli.rs b/substrate/bin/node/cli/src/cli.rs index 3345afae4fd2b181f147fd6fb6d5597d36db7885..56fbed51f8a453b702349f815bc58a07abbd0370 100644 --- a/substrate/bin/node/cli/src/cli.rs +++ b/substrate/bin/node/cli/src/cli.rs @@ -49,7 +49,7 @@ pub struct Cli { /// Possible subcommands of the main binary. #[derive(Debug, clap::Subcommand)] pub enum Subcommand { - /// The custom inspect subcommmand for decoding blocks and extrinsics. + /// The custom inspect subcommand for decoding blocks and extrinsics. #[command( name = "inspect", about = "Decode given block or extrinsic using current native runtime." diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs index e4b425e6f96342ee02dd46e0ea31d7a26790677b..d6e2a29d30b8ab99f45890424e99c60c26342151 100644 --- a/substrate/bin/node/cli/src/service.rs +++ b/substrate/bin/node/cli/src/service.rs @@ -53,7 +53,7 @@ pub type HostFunctions = ( frame_benchmarking::benchmarking::HostFunctions, ); -/// A specialized `WasmExecutor` intended to use accross substrate node. It provides all required +/// A specialized `WasmExecutor` intended to use across substrate node. It provides all required /// HostFunctions. pub type RuntimeExecutor = sc_executor::WasmExecutor; @@ -652,6 +652,7 @@ pub fn new_full_base( prometheus_registry: prometheus_registry.clone(), links: beefy_links, on_demand_justifications_handler: beefy_on_demand_justifications_handler, + is_authority: role.is_authority(), }; let beefy_gadget = beefy::start_beefy_gadget::<_, _, _, _, _, _, _>(beefy_params); diff --git a/substrate/bin/node/cli/tests/fees.rs b/substrate/bin/node/cli/tests/fees.rs index 8c7b3c873157770a8f156a019aa7e43e66460bae..69c96bf63a6d8d7b7c24746cdf5c3c48b526c12e 100644 --- a/substrate/bin/node/cli/tests/fees.rs +++ b/substrate/bin/node/cli/tests/fees.rs @@ -135,7 +135,7 @@ fn transaction_fee_is_correct() { // if weight of the cheapest weight would be 10^7, this would be 10^9, which is: // - 1 MILLICENTS in substrate node. // - 1 milli-dot based on current polkadot runtime. - // (this baed on assigning 0.1 CENT to the cheapest tx with `weight = 100`) + // (this based on assigning 0.1 CENT to the cheapest tx with `weight = 100`) let mut t = new_test_ext(compact_code_unwrap()); t.insert(>::hashed_key_for(alice()), new_account_info(100)); t.insert(>::hashed_key_for(bob()), new_account_info(10)); diff --git a/substrate/bin/node/runtime/Cargo.toml b/substrate/bin/node/runtime/Cargo.toml index 4d342ceb460a3f3a5c12b502915f2b679c2b469c..8f68b1d3e2f094fd9b8ca7cf496db51c26188282 100644 --- a/substrate/bin/node/runtime/Cargo.toml +++ b/substrate/bin/node/runtime/Cargo.toml @@ -23,7 +23,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", "max-encoded-len", ] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } static_assertions = "1.1.0" log = { workspace = true } serde_json = { features = ["alloc", "arbitrary_precision"], workspace = true } diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index ca7e14f6eb169f4edac300272c2455e5fff0975f..a9606ac0bb759dbc468db1c8c238b8ea35b0b6a1 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -1303,6 +1303,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = ConstU32<{ 64 * 1024 }>; type MaxStale = ConstU32<128>; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = (); } parameter_types! { diff --git a/substrate/bin/node/testing/Cargo.toml b/substrate/bin/node/testing/Cargo.toml index 31f8689d46ca30546482d438938c09457a433bf5..fa3f90193ba5d11325bfb068fa69780495ec8c90 100644 --- a/substrate/bin/node/testing/Cargo.toml +++ b/substrate/bin/node/testing/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } fs_extra = "1" -futures = "0.3.21" +futures = "0.3.30" log = { workspace = true, default-features = true } tempfile = "3.1.0" frame-system = { path = "../../../frame/system" } diff --git a/substrate/bin/node/testing/src/bench.rs b/substrate/bin/node/testing/src/bench.rs index df302a6453b9ffbef603afc36fc860a1535a7ca2..e5c2563905e9ea59901db1564f2b0f34f6bd4aec 100644 --- a/substrate/bin/node/testing/src/bench.rs +++ b/substrate/bin/node/testing/src/bench.rs @@ -84,7 +84,7 @@ impl BenchPair { /// Drop system cache. /// -/// Will panic if cache drop is impossbile. +/// Will panic if cache drop is impossible. pub fn drop_system_cache() { #[cfg(target_os = "windows")] { @@ -173,7 +173,7 @@ impl Clone for BenchDb { // We clear system cache after db clone but before any warmups. // This populates system cache with some data unrelated to actual - // data we will be quering further under benchmark (like what + // data we will be querying further under benchmark (like what // would have happened in real system that queries random entries // from database). drop_system_cache(); @@ -443,7 +443,7 @@ impl BenchDb { BlockContentIterator::new(content, &self.keyring, client) } - /// Get cliet for this database operations. + /// Get client for this database operations. pub fn client(&mut self) -> Client { let (client, _backend, _task_executor) = Self::bench_client(self.database_type, self.directory_guard.path(), &self.keyring); diff --git a/substrate/bin/utils/chain-spec-builder/src/lib.rs b/substrate/bin/utils/chain-spec-builder/src/lib.rs index 8c78030c885484b7d1f9ba3213dbfa76faa36c52..dbd0437921ff3b0dd83612cfae1457e729daf37e 100644 --- a/substrate/bin/utils/chain-spec-builder/src/lib.rs +++ b/substrate/bin/utils/chain-spec-builder/src/lib.rs @@ -185,7 +185,7 @@ pub struct ConvertToRawCmd { /// Verifies the provided input chain spec. /// /// Silently checks if given input chain spec can be converted to raw. It allows to check if all -/// RuntimeGenesisConfig fiels are properly initialized and if the json does not contain invalid +/// RuntimeGenesisConfig fields are properly initialized and if the json does not contain invalid /// fields. #[derive(Parser, Debug, Clone)] pub struct VerifyCmd { diff --git a/substrate/bin/utils/subkey/README.md b/substrate/bin/utils/subkey/README.md index a5f27cfd3707d6278ac9f78b727021a305a18457..fc1053e232d70d746cf315d203a51dfd09e8447c 100644 --- a/substrate/bin/utils/subkey/README.md +++ b/substrate/bin/utils/subkey/README.md @@ -77,7 +77,7 @@ below can be derived from those secrets. The output above also show the **public key** and the **Account ID**. Those are the independent from the network where you will use the key. -The **SS58 address** (or **Public Address**) of a new account is a reprensentation of the public keys of an account for +The **SS58 address** (or **Public Address**) of a new account is a representation of the public keys of an account for a given network (for instance Kusama or Polkadot). You can read more about the [SS58 format in the Substrate Docs](https://docs.substrate.io/reference/address-formats/) @@ -143,7 +143,7 @@ Secret phrase `soup lyrics media market way crouch elevator put moon useful ques SS58 Address: 5He5pZpc7AJ8evPuab37vJF6KkFDqq9uDq2WXh877Qw6iaVC ``` -Using the `inspect` command (see more details below), we see that knowning only the **secret seed** is no longer +Using the `inspect` command (see more details below), we see that knowing only the **secret seed** is no longer sufficient to recover the account: ```bash diff --git a/substrate/bin/utils/subkey/src/lib.rs b/substrate/bin/utils/subkey/src/lib.rs index f3023acde4047a715c0756f26ef2e3944c8206da..33f28ef46a5e7805b2c00b047e413f049169f6ef 100644 --- a/substrate/bin/utils/subkey/src/lib.rs +++ b/substrate/bin/utils/subkey/src/lib.rs @@ -94,10 +94,10 @@ //! seed** (also called **Private Key**). Those 2 secrets are the pieces of information you MUST //! keep safe and secret. All the other information below can be derived from those secrets. //! -//! The output above also show the **public key** and the **Account ID**. Those are the independant +//! The output above also show the **public key** and the **Account ID**. Those are the independent //! from the network where you will use the key. //! -//! The **SS58 address** (or **Public Address**) of a new account is a reprensentation of the public +//! The **SS58 address** (or **Public Address**) of a new account is a representation of the public //! keys of an account for a given network (for instance Kusama or Polkadot). //! //! You can read more about the [SS58 format in the Substrate Docs](https://docs.substrate.io/reference/address-formats/) and see the list of reserved prefixes in the [SS58 Registry](https://github.com/paritytech/ss58-registry). @@ -110,7 +110,7 @@ //! //! ### Json output //! -//! `subkey` can calso generate the output as *json*. This is useful for automation. +//! `subkey` can also generate the output as *json*. This is useful for automation. //! //! command: //! @@ -163,7 +163,7 @@ //! SS58 Address: 5He5pZpc7AJ8evPuab37vJF6KkFDqq9uDq2WXh877Qw6iaVC //! ``` //! -//! Using the `inspect` command (see more details below), we see that knowning only the **secret +//! Using the `inspect` command (see more details below), we see that knowing only the **secret //! seed** is no longer sufficient to recover the account: //! //! ```bash @@ -184,7 +184,7 @@ //! //! ### Inspecting a key //! -//! If you have *some data* about a key, `subkey inpsect` will help you discover more information +//! If you have *some data* about a key, `subkey inspect` will help you discover more information //! about it. //! //! If you have **secrets** that you would like to verify for instance, you can use: diff --git a/substrate/client/api/Cargo.toml b/substrate/client/api/Cargo.toml index cd7b613e277f3952304d70c3bf63c36d09432bc2..fb650c5b532f2cbf841dc65e5f7f1a3c35dae733 100644 --- a/substrate/client/api/Cargo.toml +++ b/substrate/client/api/Cargo.toml @@ -21,7 +21,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } fnv = "1.0.6" -futures = "0.3.21" +futures = "0.3.30" log = { workspace = true, default-features = true } parking_lot = "0.12.1" prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } diff --git a/substrate/client/authority-discovery/Cargo.toml b/substrate/client/authority-discovery/Cargo.toml index 26580064b3c453fb3a6ef77ff8d45413877f1d8d..dbd9ba0131a6825e708459ee86c702ea89974a57 100644 --- a/substrate/client/authority-discovery/Cargo.toml +++ b/substrate/client/authority-discovery/Cargo.toml @@ -21,7 +21,7 @@ prost-build = "0.11" [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.1" ip_network = "0.4.1" libp2p = { version = "0.51.4", features = ["ed25519", "kad"] } @@ -43,7 +43,7 @@ sp-blockchain = { path = "../../primitives/blockchain" } sp-core = { path = "../../primitives/core" } sp-keystore = { path = "../../primitives/keystore" } sp-runtime = { path = "../../primitives/runtime" } -async-trait = "0.1.74" +async-trait = "0.1.79" multihash-codetable = { version = "0.1.1", features = [ "digest", "serde", diff --git a/substrate/client/authority-discovery/src/interval.rs b/substrate/client/authority-discovery/src/interval.rs index 23c7ce266e3bf8c13485f78dac6d4bb848ef4612..0eee0d159cd86bc781aeed0f3c68f28a75c02f46 100644 --- a/substrate/client/authority-discovery/src/interval.rs +++ b/substrate/client/authority-discovery/src/interval.rs @@ -28,6 +28,7 @@ use std::{ /// /// Doubles interval duration on each tick until the configured maximum is reached. pub struct ExpIncInterval { + start: Duration, max: Duration, next: Duration, delay: Delay, @@ -37,14 +38,29 @@ impl ExpIncInterval { /// Create a new [`ExpIncInterval`]. pub fn new(start: Duration, max: Duration) -> Self { let delay = Delay::new(start); - Self { max, next: start * 2, delay } + Self { start, max, next: start * 2, delay } } - /// Fast forward the exponentially increasing interval to the configured maximum. + /// Fast forward the exponentially increasing interval to the configured maximum, if not already + /// set. pub fn set_to_max(&mut self) { + if self.next == self.max { + return; + } + self.next = self.max; self.delay = Delay::new(self.next); } + + /// Rewind the exponentially increasing interval to the configured start, if not already set. + pub fn set_to_start(&mut self) { + if self.next == self.start * 2 { + return; + } + + self.next = self.start * 2; + self.delay = Delay::new(self.start); + } } impl Stream for ExpIncInterval { diff --git a/substrate/client/authority-discovery/src/worker.rs b/substrate/client/authority-discovery/src/worker.rs index b77f0241ec2faf4d0466103b2bd07226715b334c..546f8cdbffdc807c9efabaa4417766cd264b5da7 100644 --- a/substrate/client/authority-discovery/src/worker.rs +++ b/substrate/client/authority-discovery/src/worker.rs @@ -129,6 +129,9 @@ pub struct Worker { /// List of keys onto which addresses have been published at the latest publication. /// Used to check whether they have changed. latest_published_keys: HashSet, + /// List of the kademlia keys that have been published at the latest publication. + /// Used to associate DHT events with our published records. + latest_published_kad_keys: HashSet, /// Same value as in the configuration. publish_non_global_ips: bool, @@ -265,6 +268,7 @@ where publish_interval, publish_if_changed_interval, latest_published_keys: HashSet::new(), + latest_published_kad_keys: HashSet::new(), publish_non_global_ips: config.publish_non_global_ips, public_addresses, strict_record_validation: config.strict_record_validation, @@ -338,6 +342,7 @@ where } fn addresses_to_publish(&self) -> impl Iterator { + let local_peer_id = self.network.local_peer_id(); let publish_non_global_ips = self.publish_non_global_ips; let addresses = self .public_addresses @@ -345,7 +350,15 @@ where .into_iter() .chain(self.network.external_addresses().into_iter().filter_map(|mut address| { // Make sure the reported external address does not contain `/p2p/...` protocol. - if let Some(multiaddr::Protocol::P2p(_)) = address.iter().last() { + if let Some(multiaddr::Protocol::P2p(peer_id)) = address.iter().last() { + if peer_id != *local_peer_id.as_ref() { + error!( + target: LOG_TARGET, + "Network returned external address '{address}' with peer id \ + not matching the local peer id '{local_peer_id}'.", + ); + debug_assert!(false); + } address.pop(); } @@ -371,15 +384,16 @@ where }) .collect::>(); - let peer_id = self.network.local_peer_id(); debug!( target: LOG_TARGET, - "Authority DHT record peer_id='{peer_id}' addresses='{addresses:?}'", + "Authority DHT record peer_id='{local_peer_id}' addresses='{addresses:?}'", ); - // The address must include the peer id. - let peer_id: Multihash = peer_id.into(); - addresses.into_iter().map(move |a| a.with(multiaddr::Protocol::P2p(peer_id))) + // The address must include the local peer id. + let local_peer_id: Multihash = local_peer_id.into(); + addresses + .into_iter() + .map(move |a| a.with(multiaddr::Protocol::P2p(local_peer_id))) } /// Publish own public addresses. @@ -397,8 +411,17 @@ where self.client.as_ref(), ).await?.into_iter().collect::>(); - if only_if_changed && keys == self.latest_published_keys { - return Ok(()) + if only_if_changed { + // If the authority keys did not change and the `publish_if_changed_interval` was + // triggered then do nothing. + if keys == self.latest_published_keys { + return Ok(()) + } + + // We have detected a change in the authority keys, reset the timers to + // publish and gather data faster. + self.publish_interval.set_to_start(); + self.query_interval.set_to_start(); } let addresses = serialize_addresses(self.addresses_to_publish()); @@ -422,6 +445,8 @@ where keys_vec, )?; + self.latest_published_kad_keys = kv_pairs.iter().map(|(k, _)| k.clone()).collect(); + for (key, value) in kv_pairs.into_iter() { self.network.put_value(key, value); } @@ -523,6 +548,10 @@ where } }, DhtEvent::ValuePut(hash) => { + if !self.latest_published_kad_keys.contains(&hash) { + return; + } + // Fast forward the exponentially increasing interval to the configured maximum. In // case this was the first successful address publishing there is no need for a // timely retry. @@ -535,6 +564,11 @@ where debug!(target: LOG_TARGET, "Successfully put hash '{:?}' on Dht.", hash) }, DhtEvent::ValuePutFailed(hash) => { + if !self.latest_published_kad_keys.contains(&hash) { + // Not a value we have published or received multiple times. + return; + } + if let Some(metrics) = &self.metrics { metrics.dht_event_received.with_label_values(&["value_put_failed"]).inc(); } diff --git a/substrate/client/authority-discovery/src/worker/tests.rs b/substrate/client/authority-discovery/src/worker/tests.rs index c29120881940c1edac54b9e3ffc986f817e487ba..6c684d88e5027b97c08fc29280e9686539440441 100644 --- a/substrate/client/authority-discovery/src/worker/tests.rs +++ b/substrate/client/authority-discovery/src/worker/tests.rs @@ -716,12 +716,16 @@ fn addresses_to_publish_adds_p2p() { #[test] fn addresses_to_publish_respects_existing_p2p_protocol() { let (_dht_event_tx, dht_event_rx) = channel(1000); + let identity = Keypair::generate_ed25519(); + let peer_id = identity.public().to_peer_id(); + let external_address = "/ip6/2001:db8::/tcp/30333" + .parse::() + .unwrap() + .with(multiaddr::Protocol::P2p(peer_id.into())); let network: Arc = Arc::new(TestNetwork { - external_addresses: vec![ - "/ip6/2001:db8::/tcp/30333/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC" - .parse() - .unwrap(), - ], + peer_id, + identity, + external_addresses: vec![external_address], ..Default::default() }); diff --git a/substrate/client/basic-authorship/Cargo.toml b/substrate/client/basic-authorship/Cargo.toml index 51a06464d0d6d5df0ccf960509f4df46d8b12207..4890b66c9b2f91ef13ea74fa9f66c797d0beb32c 100644 --- a/substrate/client/basic-authorship/Cargo.toml +++ b/substrate/client/basic-authorship/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.1" log = { workspace = true, default-features = true } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } diff --git a/substrate/client/basic-authorship/src/basic_authorship.rs b/substrate/client/basic-authorship/src/basic_authorship.rs index 932287ac86577b0e1a9b2c057320aeda9d12319a..1519c76c42c0efab1fa59c7e53bed41c5e104f9b 100644 --- a/substrate/client/basic-authorship/src/basic_authorship.rs +++ b/substrate/client/basic-authorship/src/basic_authorship.rs @@ -180,7 +180,7 @@ impl ProposerFactory { /// The soft deadline indicates where we should stop attempting to add transactions /// to the block, which exhaust resources. After soft deadline is reached, /// we switch to a fixed-amount mode, in which after we see `MAX_SKIPPED_TRANSACTIONS` - /// transactions which exhaust resrouces, we will conclude that the block is full. + /// transactions which exhaust resources, we will conclude that the block is full. /// /// Setting the value too low will significantly limit the amount of transactions /// we try in case they exhaust resources. Setting the value too high can diff --git a/substrate/client/chain-spec/Cargo.toml b/substrate/client/chain-spec/Cargo.toml index f2138c07d71ad4c524e12d33ffb73d453d707fbb..f569b5f14a662b5f89e9c79f1620fb331e9d2500 100644 --- a/substrate/client/chain-spec/Cargo.toml +++ b/substrate/client/chain-spec/Cargo.toml @@ -34,7 +34,7 @@ sp-runtime = { path = "../../primitives/runtime" } sp-state-machine = { path = "../../primitives/state-machine" } log = { workspace = true } array-bytes = { version = "6.1" } -docify = "0.2.7" +docify = "0.2.8" [dev-dependencies] substrate-test-runtime = { path = "../../test-utils/runtime" } diff --git a/substrate/client/chain-spec/src/chain_spec.rs b/substrate/client/chain-spec/src/chain_spec.rs index 78e81e10d2b613d83c98e8b689cba2fd1356287c..4ec8527de264cc8df49ce67ff534d6cedef8e641 100644 --- a/substrate/client/chain-spec/src/chain_spec.rs +++ b/substrate/client/chain-spec/src/chain_spec.rs @@ -52,7 +52,7 @@ enum GenesisSource { File(PathBuf), Binary(Cow<'static, [u8]>), /// factory function + code - //Factory and G type parameter shall be removed togheter with `ChainSpec::from_genesis` + //Factory and G type parameter shall be removed together with `ChainSpec::from_genesis` Factory(Arc G + Send + Sync>, Vec), Storage(Storage), /// build action + code @@ -264,7 +264,7 @@ struct RuntimeInnerWrapper { enum Genesis { /// (Deprecated) Contains the JSON representation of G (the native type representing the /// runtime's `RuntimeGenesisConfig` struct) (will be removed with `ChainSpec::from_genesis`) - /// without the runtime code. It is required to deserialize the legacy chainspecs genereted + /// without the runtime code. It is required to deserialize the legacy chainspecs generated /// with `ChainsSpec::from_genesis` method. Runtime(G), /// (Deprecated) Contains the JSON representation of G (the native type representing the @@ -276,13 +276,13 @@ enum Genesis { Raw(RawGenesis), /// State root hash of the genesis storage. StateRootHash(StorageData), - /// Represents the runtime genesis config in JSON format toghether with runtime code. + /// Represents the runtime genesis config in JSON format together with runtime code. RuntimeGenesis(RuntimeGenesisInner), } /// A configuration of a client. Does not include runtime storage initialization. /// Note: `genesis` field is ignored due to way how the chain specification is serialized into -/// JSON file. Refer to [`ChainSpecJsonContainer`], which flattens [`ClientSpec`] and denies uknown +/// JSON file. Refer to [`ChainSpecJsonContainer`], which flattens [`ClientSpec`] and denies unknown /// fields. #[derive(Serialize, Deserialize, Clone, Debug)] #[serde(rename_all = "camelCase")] @@ -508,7 +508,7 @@ impl ChainSpec { self.client_spec.fork_id.as_deref() } - /// Additional loosly-typed properties of the chain. + /// Additional loosely-typed properties of the chain. /// /// Returns an empty JSON object if 'properties' not defined in config pub fn properties(&self) -> Properties { diff --git a/substrate/client/chain-spec/src/extension.rs b/substrate/client/chain-spec/src/extension.rs index f2939741535f730787846d2a641a2721f5cec9e5..d1f03539349686cf22b00b362907eb0c3f67f406 100644 --- a/substrate/client/chain-spec/src/extension.rs +++ b/substrate/client/chain-spec/src/extension.rs @@ -284,7 +284,7 @@ where } } -/// A subset of the `Extension` trait that only allows for quering extensions. +/// A subset of the `Extension` trait that only allows for querying extensions. pub trait GetExtension { /// Get an extension of specific type. fn get_any(&self, t: TypeId) -> &dyn Any; diff --git a/substrate/client/chain-spec/src/genesis_config_builder.rs b/substrate/client/chain-spec/src/genesis_config_builder.rs index c8b54f66be6f52ffb6900fafd2e8ec1652030976..61f065e213cb1aca50e1b34d84a855cec0ee33ca 100644 --- a/substrate/client/chain-spec/src/genesis_config_builder.rs +++ b/substrate/client/chain-spec/src/genesis_config_builder.rs @@ -33,7 +33,7 @@ use std::borrow::Cow; /// A utility that facilitates calling the GenesisBuilder API from the runtime wasm code blob. /// /// `EHF` type allows to specify the extended host function required for building runtime's genesis -/// config. The type will be compbined with default `sp_io::SubstrateHostFunctions`. +/// config. The type will be combined with default `sp_io::SubstrateHostFunctions`. pub struct GenesisConfigBuilderRuntimeCaller<'a, EHF = ()> where EHF: HostFunctions, diff --git a/substrate/client/chain-spec/src/lib.rs b/substrate/client/chain-spec/src/lib.rs index eab5f789f29a0589d68c3c0aa86952c75f112afa..e8b87a6040422aab2d6b5ad3e2a90c7b386aa318 100644 --- a/substrate/client/chain-spec/src/lib.rs +++ b/substrate/client/chain-spec/src/lib.rs @@ -393,7 +393,7 @@ pub trait ChainSpec: BuildStorage + Send + Sync { fn protocol_id(&self) -> Option<&str>; /// Optional network fork identifier. `None` by default. fn fork_id(&self) -> Option<&str>; - /// Additional loosly-typed properties of the chain. + /// Additional loosely-typed properties of the chain. /// /// Returns an empty JSON object if 'properties' not defined in config fn properties(&self) -> Properties; diff --git a/substrate/client/cli/Cargo.toml b/substrate/client/cli/Cargo.toml index 47b29555e6603e9e72f8dca634f7beb20069b5e7..805d3ee117f4ac19b636550bea6a0845978defee 100644 --- a/substrate/client/cli/Cargo.toml +++ b/substrate/client/cli/Cargo.toml @@ -20,7 +20,7 @@ array-bytes = "6.1" chrono = "0.4.31" clap = { version = "4.5.3", features = ["derive", "string", "wrap_help"] } fdlimit = "0.3.0" -futures = "0.3.21" +futures = "0.3.30" itertools = "0.10.3" libp2p-identity = { version = "0.1.3", features = ["ed25519", "peerid"] } log = { workspace = true, default-features = true } diff --git a/substrate/client/cli/src/commands/vanity.rs b/substrate/client/cli/src/commands/vanity.rs index ce75161329893d281868c659bbc6656aa9ece194..330a59493efc9536cb1d78f2d97b40a11c94d4b0 100644 --- a/substrate/client/cli/src/commands/vanity.rs +++ b/substrate/client/cli/src/commands/vanity.rs @@ -51,7 +51,7 @@ pub struct VanityCmd { impl VanityCmd { /// Run the command pub fn run(&self) -> error::Result<()> { - let formated_seed = with_crypto_scheme!( + let formatted_seed = with_crypto_scheme!( self.crypto_scheme.scheme, generate_key( &self.pattern, @@ -62,7 +62,7 @@ impl VanityCmd { with_crypto_scheme!( self.crypto_scheme.scheme, print_from_uri( - &formated_seed, + &formatted_seed, None, self.network_scheme.network, self.output_scheme.output_type, diff --git a/substrate/client/cli/src/params/network_params.rs b/substrate/client/cli/src/params/network_params.rs index 12f19df2a68549be318bedf1cc86875a292ee86b..94efb4280912041dcea512096226824012114b9b 100644 --- a/substrate/client/cli/src/params/network_params.rs +++ b/substrate/client/cli/src/params/network_params.rs @@ -310,7 +310,7 @@ mod tests { } #[test] - fn sync_ingores_case() { + fn sync_ignores_case() { let params = Cli::try_parse_from(["", "--sync", "wArP"]).expect("Parses network params"); assert_eq!(SyncMode::Warp, params.network_params.sync); diff --git a/substrate/client/consensus/aura/Cargo.toml b/substrate/client/consensus/aura/Cargo.toml index 213f75974da0c99cc5c39248a8a789c4989d8047..64e2d16cd913aaea5d2688087e90088e5ff46ab8 100644 --- a/substrate/client/consensus/aura/Cargo.toml +++ b/substrate/client/consensus/aura/Cargo.toml @@ -16,9 +16,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.6.1" } -futures = "0.3.21" +futures = "0.3.30" log = { workspace = true, default-features = true } thiserror = { workspace = true } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } diff --git a/substrate/client/consensus/aura/src/lib.rs b/substrate/client/consensus/aura/src/lib.rs index 1be7be8eeeaa60a75ae8b6ef95fd8cbbd5b3998e..e220aaac508df50c1c6d440cfe50ee3b4015d64a 100644 --- a/substrate/client/consensus/aura/src/lib.rs +++ b/substrate/client/consensus/aura/src/lib.rs @@ -82,7 +82,7 @@ pub enum CompatibilityMode { None, /// Call `initialize_block` before doing any runtime calls. /// - /// Previously the node would execute `initialize_block` before fetchting the authorities + /// Previously the node would execute `initialize_block` before fetching the authorities /// from the runtime. This behaviour changed in: /// /// By calling `initialize_block` before fetching the authorities, on a block that diff --git a/substrate/client/consensus/babe/Cargo.toml b/substrate/client/consensus/babe/Cargo.toml index c98fb7112b7c25bd39d980cab20c188f30b42eff..b001e3d117aa94c86e76484336b434f840ec3e69 100644 --- a/substrate/client/consensus/babe/Cargo.toml +++ b/substrate/client/consensus/babe/Cargo.toml @@ -17,9 +17,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } -futures = "0.3.21" +futures = "0.3.30" log = { workspace = true, default-features = true } num-bigint = "0.4.3" num-rational = "0.4.1" @@ -54,4 +54,4 @@ sc-network-test = { path = "../../network/test" } sp-timestamp = { path = "../../../primitives/timestamp" } sp-tracing = { path = "../../../primitives/tracing" } substrate-test-runtime-client = { path = "../../../test-utils/runtime/client" } -tokio = "1.22.0" +tokio = "1.37" diff --git a/substrate/client/consensus/babe/rpc/Cargo.toml b/substrate/client/consensus/babe/rpc/Cargo.toml index 043b566673e7bd5aa581e3043dfa1280aa04d53f..b2661bbde27e4a9d72d0140e241c1da1bcc0f2b2 100644 --- a/substrate/client/consensus/babe/rpc/Cargo.toml +++ b/substrate/client/consensus/babe/rpc/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] jsonrpsee = { version = "0.22", features = ["client-core", "macros", "server"] } -futures = "0.3.21" +futures = "0.3.30" serde = { features = ["derive"], workspace = true, default-features = true } thiserror = { workspace = true } sc-consensus-babe = { path = ".." } @@ -34,7 +34,7 @@ sp-runtime = { path = "../../../../primitives/runtime" } [dev-dependencies] serde_json = { workspace = true, default-features = true } -tokio = "1.22.0" +tokio = "1.37" sc-consensus = { path = "../../common" } sc-keystore = { path = "../../../keystore" } sc-transaction-pool-api = { path = "../../../transaction-pool/api" } diff --git a/substrate/client/consensus/babe/src/authorship.rs b/substrate/client/consensus/babe/src/authorship.rs index 11f5233abc6b384ef034acaa1f26a1833aee124a..57ee706a04f6bfc80ff29b6e0bbf91a4874991b1 100644 --- a/substrate/client/consensus/babe/src/authorship.rs +++ b/substrate/client/consensus/babe/src/authorship.rs @@ -59,7 +59,7 @@ pub(super) fn calculate_primary_threshold( assert!(theta > 0.0, "authority with weight 0."); // NOTE: in the equation `p = 1 - (1 - c)^theta` the value of `p` is always - // capped by `c`. For all pratical purposes `c` should always be set to a + // capped by `c`. For all practical purposes `c` should always be set to a // value < 0.5, as such in the computations below we should never be near // edge cases like `0.999999`. diff --git a/substrate/client/consensus/babe/src/lib.rs b/substrate/client/consensus/babe/src/lib.rs index ccf72939631accae68c9e473cf30461c2fca65dc..d10bdd8c7e4c4d108b78ef3286f475ba85e16a8a 100644 --- a/substrate/client/consensus/babe/src/lib.rs +++ b/substrate/client/consensus/babe/src/lib.rs @@ -1418,7 +1418,7 @@ where // Skip babe logic if block already in chain or importing blocks during initial sync, // otherwise the check for epoch changes will error because trying to re-import an - // epoch change or because of missing epoch data in the tree, respectivelly. + // epoch change or because of missing epoch data in the tree, respectively. if info.block_gap.map_or(false, |(s, e)| s <= number && number <= e) || block_status == BlockStatus::InChain { diff --git a/substrate/client/consensus/babe/src/migration.rs b/substrate/client/consensus/babe/src/migration.rs index bec2d0a61f4b6d4f55236f69042bd764629b5f92..5f1ece3ec0e208306a935f269b47667cdb8b4863 100644 --- a/substrate/client/consensus/babe/src/migration.rs +++ b/substrate/client/consensus/babe/src/migration.rs @@ -64,7 +64,7 @@ impl EpochT for EpochV0 { // Implement From for Epoch impl EpochV0 { - /// Migrate the sturct to current epoch version. + /// Migrate the struct to current epoch version. pub fn migrate(self, config: &BabeConfiguration) -> Epoch { sp_consensus_babe::Epoch { epoch_index: self.epoch_index, diff --git a/substrate/client/consensus/beefy/Cargo.toml b/substrate/client/consensus/beefy/Cargo.toml index 8552a49002239aaeaf232c0aebf4180cec207337..c1d57baa394a379226a6ba8625143328f08a0235 100644 --- a/substrate/client/consensus/beefy/Cargo.toml +++ b/substrate/client/consensus/beefy/Cargo.toml @@ -14,10 +14,10 @@ workspace = true [dependencies] array-bytes = "6.1" async-channel = "1.8.0" -async-trait = "0.1.74" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } fnv = "1.0.6" -futures = "0.3" +futures = "0.3.30" log = { workspace = true, default-features = true } parking_lot = "0.12.1" thiserror = { workspace = true } @@ -40,7 +40,7 @@ sp-crypto-hashing = { path = "../../../primitives/crypto/hashing" } sp-keystore = { path = "../../../primitives/keystore" } sp-mmr-primitives = { path = "../../../primitives/merkle-mountain-range" } sp-runtime = { path = "../../../primitives/runtime" } -tokio = "1.22.0" +tokio = "1.37" [dev-dependencies] diff --git a/substrate/client/consensus/beefy/README.md b/substrate/client/consensus/beefy/README.md index 13f88303a972ac35f2bbcb11a443619b603149a7..a7956cfcd42efab055ea7cc5d0efc14f54ea20ba 100644 --- a/substrate/client/consensus/beefy/README.md +++ b/substrate/client/consensus/beefy/README.md @@ -297,7 +297,7 @@ periodically on the global topic. Let's now dive into description of the message - Justification is considered worthwhile to gossip when: - It is for a recent (implementation specific) round or the latest mandatory round. - All signatures are valid and there is at least `2/3rd + 1` of them. - - Signatorees are part of the current validator set. + - Signatories are part of the current validator set. - Mandatory justifications should be announced periodically. ## Misbehavior diff --git a/substrate/client/consensus/beefy/rpc/Cargo.toml b/substrate/client/consensus/beefy/rpc/Cargo.toml index bb2ae4a08966707fd93afd48b625ccc4f760762b..e46fc4f4410a461f73f5e5fe791bd9b08fd98a99 100644 --- a/substrate/client/consensus/beefy/rpc/Cargo.toml +++ b/substrate/client/consensus/beefy/rpc/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } -futures = "0.3.21" +futures = "0.3.30" jsonrpsee = { version = "0.22", features = ["client-core", "macros", "server"] } log = { workspace = true, default-features = true } parking_lot = "0.12.1" diff --git a/substrate/client/consensus/beefy/src/communication/mod.rs b/substrate/client/consensus/beefy/src/communication/mod.rs index 6fda63688e6952839c18e1bbc9ff50ff0c5f7c21..09c540e3b8a8459826e7e338c6dc0ee83aa1823a 100644 --- a/substrate/client/consensus/beefy/src/communication/mod.rs +++ b/substrate/client/consensus/beefy/src/communication/mod.rs @@ -99,7 +99,7 @@ mod cost { // On-demand request was refused by peer. pub(super) const REFUSAL_RESPONSE: Rep = Rep::new(-100, "BEEFY: Proof request refused"); // On-demand request for a proof that can't be found in the backend. - pub(super) const UNKOWN_PROOF_REQUEST: Rep = Rep::new(-150, "BEEFY: Unknown proof request"); + pub(super) const UNKNOWN_PROOF_REQUEST: Rep = Rep::new(-150, "BEEFY: Unknown proof request"); } // benefit scalars for reporting peers. diff --git a/substrate/client/consensus/beefy/src/communication/request_response/incoming_requests_handler.rs b/substrate/client/consensus/beefy/src/communication/request_response/incoming_requests_handler.rs index d856e9748a101b4ad71a39957579c41c0c75aaf4..ce184769fa7748368acb25db3228e4547ade4a5b 100644 --- a/substrate/client/consensus/beefy/src/communication/request_response/incoming_requests_handler.rs +++ b/substrate/client/consensus/beefy/src/communication/request_response/incoming_requests_handler.rs @@ -170,7 +170,7 @@ where .flatten() .and_then(|hash| self.client.justifications(hash).ok().flatten()) .and_then(|justifs| justifs.get(BEEFY_ENGINE_ID).cloned()) - .ok_or_else(|| reputation_changes.push(cost::UNKOWN_PROOF_REQUEST)); + .ok_or_else(|| reputation_changes.push(cost::UNKNOWN_PROOF_REQUEST)); request .pending_response .send(netconfig::OutgoingResponse { diff --git a/substrate/client/consensus/beefy/src/keystore.rs b/substrate/client/consensus/beefy/src/keystore.rs index 2ddc938fbc6ce33aa74c62d051cf77a519c634ee..9582c2661c30b431a5934e2bcc592b4a56faf3b3 100644 --- a/substrate/client/consensus/beefy/src/keystore.rs +++ b/substrate/client/consensus/beefy/src/keystore.rs @@ -404,7 +404,7 @@ pub mod tests { let store: BeefyKeystore = Some(store).into(); - let msg = b"are you involved or commited?"; + let msg = b"are you involved or committed?"; let sig1 = store.sign(&alice, msg).unwrap(); let sig2 = Keyring::::Alice.sign(msg); @@ -440,7 +440,7 @@ pub mod tests { let alice = Keyring::Alice.public(); - let msg = b"are you involved or commited?"; + let msg = b"are you involved or committed?"; let sig = store.sign(&alice, msg).err().unwrap(); let err = Error::Signature(expected_error_message.to_string()); @@ -463,7 +463,7 @@ pub mod tests { let store: BeefyKeystore = None.into(); let alice = Keyring::Alice.public(); - let msg = b"are you involved or commited"; + let msg = b"are you involved or committed"; let sig = store.sign(&alice, msg).err().unwrap(); let err = Error::Keystore("no Keystore".to_string()); @@ -487,7 +487,7 @@ pub mod tests { let alice = Keyring::Alice.public(); // `msg` and `sig` match - let msg = b"are you involved or commited?"; + let msg = b"are you involved or committed?"; let sig = store.sign(&alice, msg).unwrap(); assert!(BeefyKeystore::verify(&alice, &sig, msg)); diff --git a/substrate/client/consensus/beefy/src/lib.rs b/substrate/client/consensus/beefy/src/lib.rs index 323af1bc8305c38bf7a34b4690b212954b7b0864..714a0fb7c8856f5d98398c45f1e860c211d0e52f 100644 --- a/substrate/client/consensus/beefy/src/lib.rs +++ b/substrate/client/consensus/beefy/src/lib.rs @@ -222,6 +222,8 @@ pub struct BeefyParams { pub links: BeefyVoterLinks, /// Handler for incoming BEEFY justifications requests from a remote peer. pub on_demand_justifications_handler: BeefyJustifsRequestHandler, + /// Whether running under "Authority" role. + pub is_authority: bool, } /// Helper object holding BEEFY worker communication/gossip components. /// @@ -270,6 +272,7 @@ where min_block_delta: u32, gossip_validator: Arc>, finality_notifications: &mut Fuse>, + is_authority: bool, ) -> Result { // Wait for BEEFY pallet to be active before starting voter. let (beefy_genesis, best_grandpa) = @@ -283,6 +286,7 @@ where runtime.clone(), &key_store, &metrics, + is_authority, ) .await?; // Update the gossip validator with the right starting round and set id. @@ -301,6 +305,7 @@ where comms: BeefyComms, links: BeefyVoterLinks, pending_justifications: BTreeMap, BeefyVersionedFinalityProof>, + is_authority: bool, ) -> BeefyWorker { BeefyWorker { backend: self.backend, @@ -313,6 +318,7 @@ where comms, links, pending_justifications, + is_authority, } } @@ -423,6 +429,7 @@ where runtime: Arc, key_store: &BeefyKeystore, metrics: &Option, + is_authority: bool, ) -> Result, Error> { // Initialize voter state from AUX DB if compatible. if let Some(mut state) = crate::aux_schema::load_persistent(backend.as_ref())? @@ -455,7 +462,13 @@ where "🥩 Handling missed BEEFY session after node restart: {:?}.", new_session_start ); - state.init_session_at(new_session_start, validator_set, key_store, metrics); + state.init_session_at( + new_session_start, + validator_set, + key_store, + metrics, + is_authority, + ); } return Ok(state) } @@ -491,6 +504,7 @@ pub async fn start_beefy_gadget( prometheus_registry, links, mut on_demand_justifications_handler, + is_authority, } = beefy_params; let BeefyNetworkParams { @@ -553,6 +567,7 @@ pub async fn start_beefy_gadget( min_block_delta, beefy_comms.gossip_validator.clone(), &mut finality_notifications, + is_authority, ).fuse() => { match builder_init_result { Ok(builder) => break builder, @@ -580,6 +595,7 @@ pub async fn start_beefy_gadget( beefy_comms, links.clone(), BTreeMap::new(), + is_authority, ); match futures::future::select( diff --git a/substrate/client/consensus/beefy/src/tests.rs b/substrate/client/consensus/beefy/src/tests.rs index d106c9dcd88165f929085972483e090dbf78c559..aecfec7b9ed14deb861ca3be34039fdf1aaa6002 100644 --- a/substrate/client/consensus/beefy/src/tests.rs +++ b/substrate/client/consensus/beefy/src/tests.rs @@ -379,6 +379,7 @@ async fn voter_init_setup( Arc::new(api.clone()), &key_store, &metrics, + true, ) .await } @@ -438,6 +439,7 @@ where min_block_delta, prometheus_registry: None, on_demand_justifications_handler: on_demand_justif_handler, + is_authority: true, }; let task = crate::start_beefy_gadget::<_, _, _, _, _, _, _>(beefy_params); diff --git a/substrate/client/consensus/beefy/src/worker.rs b/substrate/client/consensus/beefy/src/worker.rs index c8eb19621ba5a6e45ea2be93e1e2e830492a9b81..ac6b72d1ea40e9031709f96b66a706cd702255f6 100644 --- a/substrate/client/consensus/beefy/src/worker.rs +++ b/substrate/client/consensus/beefy/src/worker.rs @@ -33,7 +33,7 @@ use crate::{ }; use codec::{Codec, Decode, DecodeAll, Encode}; use futures::{stream::Fuse, FutureExt, StreamExt}; -use log::{debug, error, info, log_enabled, trace, warn}; +use log::{debug, error, info, trace, warn}; use sc_client_api::{Backend, FinalityNotification, FinalityNotifications, HeaderBackend}; use sc_utils::notification::NotificationReceiver; use sp_api::ProvideRuntimeApi; @@ -51,7 +51,7 @@ use sp_runtime::{ SaturatedConversion, }; use std::{ - collections::{BTreeMap, BTreeSet, VecDeque}, + collections::{BTreeMap, VecDeque}, fmt::Debug, sync::Arc, }; @@ -76,13 +76,13 @@ pub(crate) enum RoundAction { pub(crate) struct VoterOracle { /// Queue of known sessions. Keeps track of voting rounds (block numbers) within each session. /// - /// There are three voter states coresponding to three queue states: + /// There are three voter states corresponding to three queue states: /// 1. voter uninitialized: queue empty, /// 2. up-to-date - all mandatory blocks leading up to current GRANDPA finalized: queue has ONE /// element, the 'current session' where `mandatory_done == true`, /// 3. lagging behind GRANDPA: queue has [1, N] elements, where all `mandatory_done == false`. - /// In this state, everytime a session gets its mandatory block BEEFY finalized, it's popped - /// off the queue, eventually getting to state `2. up-to-date`. + /// In this state, every time a session gets its mandatory block BEEFY finalized, it's + /// popped off the queue, eventually getting to state `2. up-to-date`. sessions: VecDeque>, /// Min delta in block numbers between two blocks, BEEFY should vote on. min_block_delta: u32, @@ -332,6 +332,7 @@ impl PersistedState { validator_set: ValidatorSet, key_store: &BeefyKeystore, metrics: &Option, + is_authority: bool, ) { debug!(target: LOG_TARGET, "🥩 New active validator set: {:?}", validator_set); @@ -348,11 +349,16 @@ impl PersistedState { } } - if log_enabled!(target: LOG_TARGET, log::Level::Debug) { - // verify the new validator set - only do it if we're also logging the warning - if verify_validator_set::(&new_session_start, &validator_set, key_store).is_err() { - metric_inc!(metrics, beefy_no_authority_found_in_store); - } + // verify we have some BEEFY key available in keystore when role is authority. + if is_authority && key_store.public_keys().map_or(false, |k| k.is_empty()) { + error!( + target: LOG_TARGET, + "🥩 for session starting at block {:?} no BEEFY authority key found in store, \ + you must generate valid session keys \ + (https://wiki.polkadot.network/docs/maintain-guides-how-to-validate-polkadot#generating-the-session-keys)", + new_session_start, + ); + metric_inc!(metrics, beefy_no_authority_found_in_store); } let id = validator_set.id(); @@ -390,6 +396,8 @@ pub(crate) struct BeefyWorker { pub persisted_state: PersistedState, /// BEEFY voter metrics pub metrics: Option, + /// Node runs under "Authority" role. + pub is_authority: bool, } impl BeefyWorker @@ -425,6 +433,7 @@ where validator_set, &self.key_store, &self.metrics, + self.is_authority, ); } @@ -1040,33 +1049,6 @@ where } } -/// Verify `active` validator set for `block` against the key store -/// -/// We want to make sure that we have _at least one_ key in our keystore that -/// is part of the validator set, that's because if there are no local keys -/// then we can't perform our job as a validator. -/// -/// Note that for a non-authority node there will be no keystore, and we will -/// return an error and don't check. The error can usually be ignored. -fn verify_validator_set( - block: &NumberFor, - active: &ValidatorSet, - key_store: &BeefyKeystore, -) -> Result<(), Error> { - let active: BTreeSet<&AuthorityId> = active.validators().iter().collect(); - - let public_keys = key_store.public_keys()?; - let store: BTreeSet<&AuthorityId> = public_keys.iter().collect(); - - if store.intersection(&active).count() == 0 { - let msg = "no authority public key found in store".to_string(); - debug!(target: LOG_TARGET, "🥩 for block {:?} {}", block, msg); - Err(Error::Keystore(msg)) - } else { - Ok(()) - } -} - #[cfg(test)] pub(crate) mod tests { use super::*; @@ -1208,6 +1190,7 @@ pub(crate) mod tests { comms, pending_justifications: BTreeMap::new(), persisted_state, + is_authority: true, } } @@ -1471,32 +1454,6 @@ pub(crate) mod tests { assert_eq!(extracted, Some(validator_set)); } - #[tokio::test] - async fn keystore_vs_validator_set() { - let keys = &[Keyring::Alice]; - let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); - let mut net = BeefyTestNet::new(1); - let mut worker = create_beefy_worker(net.peer(0), &keys[0], 1, validator_set.clone()); - - // keystore doesn't contain other keys than validators' - assert_eq!(verify_validator_set::(&1, &validator_set, &worker.key_store), Ok(())); - - // unknown `Bob` key - let keys = &[Keyring::Bob]; - let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); - let err_msg = "no authority public key found in store".to_string(); - let expected = Err(Error::Keystore(err_msg)); - assert_eq!(verify_validator_set::(&1, &validator_set, &worker.key_store), expected); - - // worker has no keystore - worker.key_store = None.into(); - let expected_err = Err(Error::Keystore("no Keystore".into())); - assert_eq!( - verify_validator_set::(&1, &validator_set, &worker.key_store), - expected_err - ); - } - #[tokio::test] async fn should_finalize_correctly() { let keys = [Keyring::Alice]; diff --git a/substrate/client/consensus/common/Cargo.toml b/substrate/client/consensus/common/Cargo.toml index 7f36dfc09ef8451927eb32ebd651258647abee3b..b2738a1d12d9735e21152e9129c9b33b907b6df9 100644 --- a/substrate/client/consensus/common/Cargo.toml +++ b/substrate/client/consensus/common/Cargo.toml @@ -6,7 +6,7 @@ edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository.workspace = true -description = "Collection of common consensus specific imlementations for Substrate (client)" +description = "Collection of common consensus specific implementations for Substrate (client)" readme = "README.md" [lints] @@ -16,8 +16,8 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.74" -futures = { version = "0.3.21", features = ["thread-pool"] } +async-trait = "0.1.79" +futures = { version = "0.3.30", features = ["thread-pool"] } futures-timer = "3.0.1" libp2p-identity = { version = "0.1.3", features = ["ed25519", "peerid"] } log = { workspace = true, default-features = true } diff --git a/substrate/client/consensus/common/src/import_queue.rs b/substrate/client/consensus/common/src/import_queue.rs index 39d5bf8ed35d17468a8d8819a423f22a0d80667c..062e244a912ef824b3a85574c5195889e2556a89 100644 --- a/substrate/client/consensus/common/src/import_queue.rs +++ b/substrate/client/consensus/common/src/import_queue.rs @@ -133,7 +133,7 @@ pub trait ImportQueue: Send { /// Start asynchronous runner for import queue. /// /// Takes an object implementing [`Link`] which allows the import queue to - /// influece the synchronization process. + /// influence the synchronization process. async fn run(self, link: Box>); } diff --git a/substrate/client/consensus/common/src/import_queue/basic_queue.rs b/substrate/client/consensus/common/src/import_queue/basic_queue.rs index 1cc7ec26fd1930100ebe45f9c1e8ba643bd5be37..125d4f104c1918bf5a54e5a13f67b4603a3ae302 100644 --- a/substrate/client/consensus/common/src/import_queue/basic_queue.rs +++ b/substrate/client/consensus/common/src/import_queue/basic_queue.rs @@ -187,7 +187,7 @@ impl ImportQueue for BasicQueue { /// Start asynchronous runner for import queue. /// /// Takes an object implementing [`Link`] which allows the import queue to - /// influece the synchronization process. + /// influence the synchronization process. async fn run(mut self, mut link: Box>) { loop { if let Err(_) = self.result_port.next_action(&mut *link).await { @@ -198,7 +198,7 @@ impl ImportQueue for BasicQueue { } } -/// Messages destinated to the background worker. +/// Messages designated to the background worker. mod worker_messages { use super::*; diff --git a/substrate/client/consensus/epochs/src/lib.rs b/substrate/client/consensus/epochs/src/lib.rs index 29bb18e147c2b8aef2a172130e753bf0d2f7193d..bbc143b7bd302aec2f45f868c48ab9f7dd7ff58f 100644 --- a/substrate/client/consensus/epochs/src/lib.rs +++ b/substrate/client/consensus/epochs/src/lib.rs @@ -326,7 +326,7 @@ impl AsRef for IncrementedEpoch { /// /// The first epoch, epoch_0, is special cased by saying that it starts at /// slot number of the first block in the chain. When bootstrapping a chain, -/// there can be multiple competing block #1s, so we have to ensure that the overlayed +/// there can be multiple competing block #1s, so we have to ensure that the overlaid /// DAG doesn't get confused. /// /// The first block of every epoch should be producing a descriptor for the next @@ -655,7 +655,7 @@ where /// Revert to a specified block given its `hash` and `number`. /// This removes all the epoch changes information that were announced by - /// all the given block descendents. + /// all the given block descendants. pub fn revert>( &mut self, descendent_of_builder: D, diff --git a/substrate/client/consensus/grandpa/Cargo.toml b/substrate/client/consensus/grandpa/Cargo.toml index 1ab953525220b82a500e5a23be0b94a673be51a2..797b4ea35b29872e201b8abd6ae5781526f72108 100644 --- a/substrate/client/consensus/grandpa/Cargo.toml +++ b/substrate/client/consensus/grandpa/Cargo.toml @@ -19,10 +19,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] ahash = "0.8.2" array-bytes = "6.1" -async-trait = "0.1.74" +async-trait = "0.1.79" dyn-clone = "1.0" finality-grandpa = { version = "0.16.2", features = ["derive-codec"] } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.1" log = { workspace = true, default-features = true } parity-scale-codec = { version = "3.6.1", features = ["derive"] } @@ -58,7 +58,7 @@ sp-runtime = { path = "../../../primitives/runtime" } assert_matches = "1.3.0" finality-grandpa = { version = "0.16.2", features = ["derive-codec", "test-helpers"] } serde = { workspace = true, default-features = true } -tokio = "1.22.0" +tokio = "1.37" sc-network = { path = "../../network" } sc-network-test = { path = "../../network/test" } sp-keyring = { path = "../../../primitives/keyring" } diff --git a/substrate/client/consensus/grandpa/rpc/Cargo.toml b/substrate/client/consensus/grandpa/rpc/Cargo.toml index f7e87415448e8c44952b957cf365fb36b089e4b0..0789a429ac416de70201e30e909d4f3dec7e4a66 100644 --- a/substrate/client/consensus/grandpa/rpc/Cargo.toml +++ b/substrate/client/consensus/grandpa/rpc/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] finality-grandpa = { version = "0.16.2", features = ["derive-codec"] } -futures = "0.3.16" +futures = "0.3.30" jsonrpsee = { version = "0.22", features = ["client-core", "macros", "server"] } log = { workspace = true, default-features = true } parity-scale-codec = { version = "3.6.1", features = ["derive"] } diff --git a/substrate/client/consensus/grandpa/src/communication/mod.rs b/substrate/client/consensus/grandpa/src/communication/mod.rs index 5c7e1276297a988b3109ae650e121331f57f930d..6e87d6bf9a28924811efab920b98da7c51480481 100644 --- a/substrate/client/consensus/grandpa/src/communication/mod.rs +++ b/substrate/client/consensus/grandpa/src/communication/mod.rs @@ -222,13 +222,13 @@ pub(crate) struct NetworkBridge, S: Syncing> { neighbor_sender: periodic::NeighborPacketSender, /// `NeighborPacketWorker` processing packets sent through the `NeighborPacketSender`. - // `NetworkBridge` is required to be cloneable, thus one needs to be able to clone its + // `NetworkBridge` is required to be clonable, thus one needs to be able to clone its // children, thus one has to wrap `neighbor_packet_worker` with an `Arc` `Mutex`. neighbor_packet_worker: Arc>>, /// Receiver side of the peer report stream populated by the gossip validator, forwarded to the /// gossip engine. - // `NetworkBridge` is required to be cloneable, thus one needs to be able to clone its + // `NetworkBridge` is required to be clonable, thus one needs to be able to clone its // children, thus one has to wrap gossip_validator_report_stream with an `Arc` `Mutex`. Given // that it is just an `UnboundedReceiver`, one could also switch to a // multi-producer-*multi*-consumer channel implementation. @@ -766,7 +766,7 @@ impl Sink> for OutgoingMessages { ) .ok_or_else(|| { Error::Signing(format!( - "Failed to sign GRANDPA vote for round {} targetting {:?}", + "Failed to sign GRANDPA vote for round {} targeting {:?}", self.round, target_hash )) })?; diff --git a/substrate/client/consensus/grandpa/src/communication/periodic.rs b/substrate/client/consensus/grandpa/src/communication/periodic.rs index daa752920287971ee986ae5c65fb995b52187495..9d0e76b7c80bd2911d49d14ddd3923f16dd6b211 100644 --- a/substrate/client/consensus/grandpa/src/communication/periodic.rs +++ b/substrate/client/consensus/grandpa/src/communication/periodic.rs @@ -106,7 +106,7 @@ impl Stream for NeighborPacketWorker { // Make sure the underlying task is scheduled for wake-up. // - // Note: In case poll_unpin is called after the resetted delay fires again, this + // Note: In case poll_unpin is called after the reset delay fires again, this // will drop one tick. Deemed as very unlikely and also not critical. while this.delay.poll_unpin(cx).is_ready() {} diff --git a/substrate/client/consensus/grandpa/src/tests.rs b/substrate/client/consensus/grandpa/src/tests.rs index 7e42c2d45c733b8cbc32a599680202399fadaed6..14708cc89e890bd3fe7963589010896f78bb6ab1 100644 --- a/substrate/client/consensus/grandpa/src/tests.rs +++ b/substrate/client/consensus/grandpa/src/tests.rs @@ -1805,7 +1805,7 @@ async fn grandpa_environment_checks_if_best_block_is_descendent_of_finality_targ ); // best block is higher than finality target and it's on the same fork, - // the best block passed to the voting rule should not be overriden + // the best block passed to the voting rule should not be overridden select_chain.set_best_chain(client.expect_header(hashof10_a).unwrap()); select_chain.set_finality_target(client.expect_header(hashof5_a).unwrap().hash()); voting_rule.set_expected_best_block(hashof10_a); @@ -1940,7 +1940,7 @@ async fn justification_with_equivocation() { precommits.push(precommit); } - // we create an equivocation for the 67th validator targetting blocks #1 and #2. + // we create an equivocation for the 67th validator targeting blocks #1 and #2. // this should be accounted as "voting for all blocks" and therefore block #3 will // have 67/100 votes, reaching finality threshold. { diff --git a/substrate/client/consensus/grandpa/src/until_imported.rs b/substrate/client/consensus/grandpa/src/until_imported.rs index 14f32ecc883662d9d690b75611657cef0aa797e6..f3874086b58143d185f87ad9c6e0eb009acf16e9 100644 --- a/substrate/client/consensus/grandpa/src/until_imported.rs +++ b/substrate/client/consensus/grandpa/src/until_imported.rs @@ -1002,7 +1002,7 @@ mod tests { } #[test] - fn block_global_message_wait_completed_return_none_on_block_number_missmatch() { + fn block_global_message_wait_completed_return_none_on_block_number_mismatch() { let msg_inner = test_catch_up(); let waiting_block_1 = diff --git a/substrate/client/consensus/grandpa/src/voting_rule.rs b/substrate/client/consensus/grandpa/src/voting_rule.rs index e09780739c7353418a81488a25060eba0231ebfc..c37596d20f68625b21f443e16db3c03fc08357da 100644 --- a/substrate/client/consensus/grandpa/src/voting_rule.rs +++ b/substrate/client/consensus/grandpa/src/voting_rule.rs @@ -196,7 +196,7 @@ where target_header = backend .header(target_hash) .ok()? - .expect("Header known to exist due to the existence of one of its descendents; qed"); + .expect("Header known to exist due to the existence of one of its descendants; qed"); } } diff --git a/substrate/client/consensus/grandpa/src/warp_proof.rs b/substrate/client/consensus/grandpa/src/warp_proof.rs index 29111712ec382ec6758fddcfa670d4edd2a9a7eb..7169a424c14a7726efa9531b80ef13f829bc9529 100644 --- a/substrate/client/consensus/grandpa/src/warp_proof.rs +++ b/substrate/client/consensus/grandpa/src/warp_proof.rs @@ -249,7 +249,7 @@ impl> NetworkProvider: BlockNumberOps, { - /// Create a new istance for a given backend and authority set. + /// Create a new instance for a given backend and authority set. pub fn new( backend: Arc, authority_set: SharedAuthoritySet>, diff --git a/substrate/client/consensus/manual-seal/Cargo.toml b/substrate/client/consensus/manual-seal/Cargo.toml index ac32fed72289fd6a296de2720a75fb36c65ad1f8..1422d46105b22bd093de3260e3e13219aa2731a9 100644 --- a/substrate/client/consensus/manual-seal/Cargo.toml +++ b/substrate/client/consensus/manual-seal/Cargo.toml @@ -18,9 +18,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] jsonrpsee = { version = "0.22", features = ["client-core", "macros", "server"] } assert_matches = "1.3.0" -async-trait = "0.1.74" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.6.1" } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.1" log = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } diff --git a/substrate/client/consensus/manual-seal/src/consensus/babe.rs b/substrate/client/consensus/manual-seal/src/consensus/babe.rs index 26fa81459808c39635a1970b0fef7b36774803a9..bc56ce0227142fee2c001c39ce8d31cd9e6fb9b5 100644 --- a/substrate/client/consensus/manual-seal/src/consensus/babe.rs +++ b/substrate/client/consensus/manual-seal/src/consensus/babe.rs @@ -82,7 +82,7 @@ pub struct BabeVerifier { } impl BabeVerifier { - /// create a nrew verifier + /// create a new verifier pub fn new(epoch_changes: SharedEpochChanges, client: Arc) -> BabeVerifier { BabeVerifier { epoch_changes, client } } diff --git a/substrate/client/consensus/pow/Cargo.toml b/substrate/client/consensus/pow/Cargo.toml index 0791514035b43c83492175bcac67e2a6eb6aa9c6..ecfa29aa194d424d35eba858a0b829fb58a610e3 100644 --- a/substrate/client/consensus/pow/Cargo.toml +++ b/substrate/client/consensus/pow/Cargo.toml @@ -16,9 +16,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.1" log = { workspace = true, default-features = true } parking_lot = "0.12.1" diff --git a/substrate/client/consensus/slots/Cargo.toml b/substrate/client/consensus/slots/Cargo.toml index 75f8b29a2fd755c18d68b817499e2dfb75ea232c..4ac6ce90713798d2cce4172f3544c6a116736a84 100644 --- a/substrate/client/consensus/slots/Cargo.toml +++ b/substrate/client/consensus/slots/Cargo.toml @@ -17,9 +17,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.6.1" } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.1" log = { workspace = true, default-features = true } sc-client-api = { path = "../../api" } diff --git a/substrate/client/consensus/slots/src/lib.rs b/substrate/client/consensus/slots/src/lib.rs index 12636aae7a4412228ab1da5755c9f82669c469e5..d9d792005312503f48bb2d628235794952104ccd 100644 --- a/substrate/client/consensus/slots/src/lib.rs +++ b/substrate/client/consensus/slots/src/lib.rs @@ -555,7 +555,7 @@ impl SlotProportion { Self(inner.clamp(0.0, 1.0)) } - /// Returns the inner that is guaranted to be in the range `[0,1]`. + /// Returns the inner that is guaranteed to be in the range `[0,1]`. pub fn get(&self) -> f32 { self.0 } @@ -648,7 +648,7 @@ pub fn proposing_remaining_duration( } /// Calculate a slot duration lenience based on the number of missed slots from current -/// to parent. If the number of skipped slots is greated than 0 this method will apply +/// to parent. If the number of skipped slots is greater than 0 this method will apply /// an exponential backoff of at most `2^7 * slot_duration`, if no slots were skipped /// this method will return `None.` pub fn slot_lenience_exponential( @@ -680,7 +680,7 @@ pub fn slot_lenience_exponential( } /// Calculate a slot duration lenience based on the number of missed slots from current -/// to parent. If the number of skipped slots is greated than 0 this method will apply +/// to parent. If the number of skipped slots is greater than 0 this method will apply /// a linear backoff of at most `20 * slot_duration`, if no slots were skipped /// this method will return `None.` pub fn slot_lenience_linear( diff --git a/substrate/client/db/src/upgrade.rs b/substrate/client/db/src/upgrade.rs index f1e503867dfc3e4a9ad3ddd5135f5d426e9c221c..475220d9991931e5ed03f1be9ba812bb5a6b1ad4 100644 --- a/substrate/client/db/src/upgrade.rs +++ b/substrate/client/db/src/upgrade.rs @@ -79,7 +79,7 @@ impl fmt::Display for UpgradeError { write!(f, "Database version comes from future version of the client: {}", version) }, UpgradeError::DecodingJustificationBlock => { - write!(f, "Decodoning justification block failed") + write!(f, "Decoding justification block failed") }, UpgradeError::Io(err) => write!(f, "Io error: {}", err), } diff --git a/substrate/client/db/src/utils.rs b/substrate/client/db/src/utils.rs index abf9c4629cee47e31d18a18c827212b48fe31511..b532e0d4666273648a7cef073a11ab21eb3a2554 100644 --- a/substrate/client/db/src/utils.rs +++ b/substrate/client/db/src/utils.rs @@ -338,7 +338,7 @@ fn open_kvdb_rocksdb( db_config.memory_budget = memory_budget; let db = kvdb_rocksdb::Database::open(&db_config, path)?; - // write database version only after the database is succesfully opened + // write database version only after the database is successfully opened crate::upgrade::update_version(path)?; Ok(sp_database::as_database(db)) } diff --git a/substrate/client/executor/Cargo.toml b/substrate/client/executor/Cargo.toml index 7fad7e3a2a0ebaf3a8ecde19ff9517e47ff28e75..cb0befe98710f04ef857f6181cc34d2a7cfc5400 100644 --- a/substrate/client/executor/Cargo.toml +++ b/substrate/client/executor/Cargo.toml @@ -51,7 +51,7 @@ tracing-subscriber = "0.2.19" paste = "1.0" regex = "1.6.0" criterion = "0.4.0" -env_logger = "0.9" +env_logger = "0.11" num_cpus = "1.13.1" tempfile = "3.3.0" diff --git a/substrate/client/executor/common/src/error.rs b/substrate/client/executor/common/src/error.rs index b7c7e2b701984e37d43741ecb3cc2a1efdab9bdc..9d489eaae4200035b6e0ffc0b1bafb045b2ce2b7 100644 --- a/substrate/client/executor/common/src/error.rs +++ b/substrate/client/executor/common/src/error.rs @@ -145,7 +145,7 @@ pub enum WasmError { #[error("{0}")] Instantiation(String), - /// Other error happenend. + /// Other error happened. #[error("Other error happened while constructing the runtime: {0}")] Other(String), } diff --git a/substrate/client/executor/wasmtime/src/runtime.rs b/substrate/client/executor/wasmtime/src/runtime.rs index 595cc503272ebea7d249262c66b206cab5697d36..286d134ecd1715cdd876e3412c37958acfc93b71 100644 --- a/substrate/client/executor/wasmtime/src/runtime.rs +++ b/substrate/client/executor/wasmtime/src/runtime.rs @@ -462,7 +462,7 @@ pub struct Semantics { pub struct Config { /// The WebAssembly standard requires all imports of an instantiated module to be resolved, /// otherwise, the instantiation fails. If this option is set to `true`, then this behavior is - /// overriden and imports that are requested by the module and not provided by the host + /// overridden and imports that are requested by the module and not provided by the host /// functions will be resolved using stubs. These stubs will trap upon a call. pub allow_missing_func_imports: bool, diff --git a/substrate/client/executor/wasmtime/src/tests.rs b/substrate/client/executor/wasmtime/src/tests.rs index 1c06da1e3c142f58d4fa3ec64cf87577190218f8..f86a4275769401712622112c6a2c587d11e1adc4 100644 --- a/substrate/client/executor/wasmtime/src/tests.rs +++ b/substrate/client/executor/wasmtime/src/tests.rs @@ -254,7 +254,7 @@ fn test_nan_canonicalization(instantiation_strategy: InstantiationStrategy) { /// A NaN with canonical payload bits. const CANONICAL_NAN_BITS: u32 = 0x7fc00000; - /// A NaN value with an abitrary payload. + /// A NaN value with an arbitrary payload. const ARBITRARY_NAN_BITS: u32 = 0x7f812345; // This test works like this: we essentially do @@ -272,7 +272,7 @@ fn test_nan_canonicalization(instantiation_strategy: InstantiationStrategy) { // However, with the `canonicalize_nans` option turned on above, we expect that the output will // be a canonical NaN. // - // We exterpolate the results of this tests so that we assume that all intermediate computations + // We extrapolate the results of this tests so that we assume that all intermediate computations // that involve floats are sanitized and cannot produce a non-deterministic NaN. let params = (u32::to_le_bytes(ARBITRARY_NAN_BITS), u32::to_le_bytes(1)).encode(); diff --git a/substrate/client/informant/Cargo.toml b/substrate/client/informant/Cargo.toml index bd15e94ebafab26c89f76927d9d2f9c576ca1541..191ef5f19f8df65b3c817129fe34c4db7438826d 100644 --- a/substrate/client/informant/Cargo.toml +++ b/substrate/client/informant/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] ansi_term = "0.12.1" -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.1" log = { workspace = true, default-features = true } sc-client-api = { path = "../api" } diff --git a/substrate/client/merkle-mountain-range/Cargo.toml b/substrate/client/merkle-mountain-range/Cargo.toml index 60232bccb0e08d1fb65aeb6d94723c03fcf77fa1..46b7a1011c465f1aced062ba6978580a5f85c43c 100644 --- a/substrate/client/merkle-mountain-range/Cargo.toml +++ b/substrate/client/merkle-mountain-range/Cargo.toml @@ -15,7 +15,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } -futures = "0.3" +futures = "0.3.30" log = { workspace = true, default-features = true } sp-api = { path = "../../primitives/api" } sp-blockchain = { path = "../../primitives/blockchain" } @@ -32,4 +32,4 @@ parking_lot = "0.12.1" sc-block-builder = { path = "../block-builder" } sp-tracing = { path = "../../primitives/tracing" } substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } -tokio = "1.17.0" +tokio = "1.37" diff --git a/substrate/client/mixnet/Cargo.toml b/substrate/client/mixnet/Cargo.toml index 736184f4668c8c96c660502774da90aedd85b731..3beeae9f9b140549b09a7e05b5ffb5c76da40ebd 100644 --- a/substrate/client/mixnet/Cargo.toml +++ b/substrate/client/mixnet/Cargo.toml @@ -21,7 +21,7 @@ arrayvec = "0.7.2" blake2 = "0.10.4" bytes = "1" codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -futures = "0.3.25" +futures = "0.3.30" futures-timer = "3.0.2" libp2p-identity = { version = "0.1.3", features = ["peerid"] } log = { workspace = true, default-features = true } diff --git a/substrate/client/network-gossip/Cargo.toml b/substrate/client/network-gossip/Cargo.toml index a14761c0d6e81e4eeda2e1e3d8dacd5fd7b99d4a..346e6bd6a5c6ff665ac36991c46b562a235de1cf 100644 --- a/substrate/client/network-gossip/Cargo.toml +++ b/substrate/client/network-gossip/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] ahash = "0.8.2" -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.1" libp2p = "0.51.4" log = { workspace = true, default-features = true } @@ -31,8 +31,8 @@ sc-network-sync = { path = "../network/sync" } sp-runtime = { path = "../../primitives/runtime" } [dev-dependencies] -tokio = "1.22.0" -async-trait = "0.1.74" +tokio = "1.37" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } quickcheck = { version = "1.0.3", default-features = false } substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } diff --git a/substrate/client/network/Cargo.toml b/substrate/client/network/Cargo.toml index c6f17647166c4c406bd83003344177395599e67b..a891336d24172627fcacaa3efaa3f73f98b448a0 100644 --- a/substrate/client/network/Cargo.toml +++ b/substrate/client/network/Cargo.toml @@ -19,13 +19,13 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = "6.1" async-channel = "1.8.0" -async-trait = "0.1" +async-trait = "0.1.79" asynchronous-codec = "0.6" bytes = "1" codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } either = "1.5.3" fnv = "1.0.6" -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" ip_network = "0.4.1" libp2p = { version = "0.51.4", features = ["dns", "identify", "kad", "macros", "mdns", "noise", "ping", "request-response", "tcp", "tokio", "websocket", "yamux"] } diff --git a/substrate/client/network/bitswap/Cargo.toml b/substrate/client/network/bitswap/Cargo.toml index 7ef3ea212427848510e2e4b910efbe3d54e01a48..587e2e70867ba8f055bf0a3e267c48fcf4ea0cb9 100644 --- a/substrate/client/network/bitswap/Cargo.toml +++ b/substrate/client/network/bitswap/Cargo.toml @@ -21,7 +21,7 @@ prost-build = "0.11" [dependencies] async-channel = "1.8.0" cid = "0.9.0" -futures = "0.3.21" +futures = "0.3.30" libp2p-identity = { version = "0.1.3", features = ["peerid"] } log = { workspace = true, default-features = true } prost = "0.12" diff --git a/substrate/client/network/bitswap/src/lib.rs b/substrate/client/network/bitswap/src/lib.rs index 0586354d6a0dd4c576d4d326d04bacd69069bcb9..1ba95e30bad101e92adc2291b5d0928896f33269 100644 --- a/substrate/client/network/bitswap/src/lib.rs +++ b/substrate/client/network/bitswap/src/lib.rs @@ -301,7 +301,7 @@ mod tests { use substrate_test_runtime_client::{self, prelude::*, TestClientBuilder}; #[tokio::test] - async fn undecodeable_message() { + async fn undecodable_message() { let client = substrate_test_runtime_client::new(); let (bitswap, config) = BitswapRequestHandler::new(Arc::new(client)); diff --git a/substrate/client/network/common/Cargo.toml b/substrate/client/network/common/Cargo.toml index 7a6d904b74b1820234a8fd5f169a4283e5abefc4..f9248b0bb51cac8e605141d791688f73a65f602f 100644 --- a/substrate/client/network/common/Cargo.toml +++ b/substrate/client/network/common/Cargo.toml @@ -19,12 +19,12 @@ targets = ["x86_64-unknown-linux-gnu"] prost-build = "0.11" [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" bitflags = "1.3.2" codec = { package = "parity-scale-codec", version = "3.6.1", features = [ "derive", ] } -futures = "0.3.21" +futures = "0.3.30" libp2p-identity = { version = "0.1.3", features = ["peerid"] } sc-consensus = { path = "../../consensus/common" } sp-consensus = { path = "../../../primitives/consensus/common" } diff --git a/substrate/client/network/light/Cargo.toml b/substrate/client/network/light/Cargo.toml index c757f727fb71a7c2261090d741e286d7851d8239..2628fd07d3e4972de4e58135f23e4bc2e3cf008b 100644 --- a/substrate/client/network/light/Cargo.toml +++ b/substrate/client/network/light/Cargo.toml @@ -24,7 +24,7 @@ array-bytes = "6.1" codec = { package = "parity-scale-codec", version = "3.6.1", features = [ "derive", ] } -futures = "0.3.21" +futures = "0.3.30" libp2p-identity = { version = "0.1.3", features = ["peerid"] } log = { workspace = true, default-features = true } prost = "0.12" diff --git a/substrate/client/network/src/protocol/notifications/behaviour.rs b/substrate/client/network/src/protocol/notifications/behaviour.rs index 9ad41e376e824835cdb0be883b3c0f344218ed11..b945d4bfc6043cccbeb41219d2f25edd33c5d510 100644 --- a/substrate/client/network/src/protocol/notifications/behaviour.rs +++ b/substrate/client/network/src/protocol/notifications/behaviour.rs @@ -2668,7 +2668,7 @@ mod tests { // // there is not straight-forward way of adding backoff to `PeerState::Disabled` // so manually adjust the value in order to progress on to the next stage. - // This modification together with `ConnectionClosed` will conver the peer + // This modification together with `ConnectionClosed` will convert the peer // state into `PeerState::Backoff`. if let Some(PeerState::Disabled { ref mut backoff_until, .. }) = notif.peers.get_mut(&(peer, set_id)) diff --git a/substrate/client/network/src/protocol/notifications/handler.rs b/substrate/client/network/src/protocol/notifications/handler.rs index 28662be29feede2ef67f150d6fa1b67ff313ae36..391252c3ffe71cdf3f7e9b5d1efc0ee5ad0ffcc6 100644 --- a/substrate/client/network/src/protocol/notifications/handler.rs +++ b/substrate/client/network/src/protocol/notifications/handler.rs @@ -211,7 +211,7 @@ enum State { /// consequently trying to open the various notifications substreams. /// /// A [`NotifsHandlerOut::OpenResultOk`] or a [`NotifsHandlerOut::OpenResultErr`] event must - /// be emitted when transitionning to respectively [`State::Open`] or [`State::Closed`]. + /// be emitted when transitioning to respectively [`State::Open`] or [`State::Closed`]. Opening { /// Substream opened by the remote. If `Some`, has been accepted. in_substream: Option>, diff --git a/substrate/client/network/src/protocol/notifications/service/mod.rs b/substrate/client/network/src/protocol/notifications/service/mod.rs index 62e6d88a3d5a6ae36b46728a37dc3ff5c31177db..dfb19daa28efee310d65275d57ac294543e69321 100644 --- a/substrate/client/network/src/protocol/notifications/service/mod.rs +++ b/substrate/client/network/src/protocol/notifications/service/mod.rs @@ -54,7 +54,7 @@ const COMMAND_QUEUE_SIZE: usize = 64; /// Type representing subscribers of a notification protocol. type Subscribers = Arc>>>; -/// Type represending a distributable message sink. +/// Type representing a distributable message sink. /// Detached message sink must carry the protocol name for registering metrics. /// /// See documentation for [`PeerContext`] for more details. @@ -175,11 +175,11 @@ pub enum NotificationCommand { /// and an additional, distributable `NotificationsSink` which the protocol may acquire /// if it wishes to send notifications through `NotificationsSink` directly. /// -/// The distributable `NoticationsSink` is wrapped in an `Arc>` to allow +/// The distributable `NotificationsSink` is wrapped in an `Arc>` to allow /// `NotificationsService` to swap the underlying sink in case it's replaced. #[derive(Debug, Clone)] struct PeerContext { - /// Sink for sending notificaitons. + /// Sink for sending notifications. sink: NotificationsSink, /// Distributable notification sink. @@ -338,7 +338,8 @@ impl NotificationService for NotificationHandle { // Clone [`NotificationService`] fn clone(&mut self) -> Result, ()> { let mut subscribers = self.subscribers.lock(); - let (event_tx, event_rx) = tracing_unbounded("mpsc-notification-to-protocol", 100_000); + + let (event_tx, event_rx) = tracing_unbounded(self.rx.name(), 100_000); subscribers.push(event_tx); Ok(Box::new(NotificationHandle { @@ -624,7 +625,9 @@ pub fn notification_service( protocol: ProtocolName, ) -> (ProtocolHandlePair, Box) { let (cmd_tx, cmd_rx) = mpsc::channel(COMMAND_QUEUE_SIZE); - let (event_tx, event_rx) = tracing_unbounded("mpsc-notification-to-protocol", 100_000); + + let (event_tx, event_rx) = + tracing_unbounded(metric_label_for_protocol(&protocol).leak(), 100_000); let subscribers = Arc::new(Mutex::new(vec![event_tx])); ( @@ -632,3 +635,14 @@ pub fn notification_service( Box::new(NotificationHandle::new(protocol.clone(), cmd_tx, event_rx, subscribers)), ) } + +// Decorates the mpsc-notification-to-protocol metric with the name of the protocol, +// to be able to distiguish between different protocols in dashboards. +fn metric_label_for_protocol(protocol: &ProtocolName) -> String { + let protocol_name = protocol.to_string(); + let keys = protocol_name.split("/").collect::>(); + keys.iter() + .rev() + .take(2) // Last two tokens give the protocol name and version + .fold("mpsc-notification-to-protocol".into(), |acc, val| format!("{}-{}", acc, val)) +} diff --git a/substrate/client/network/src/protocol/notifications/service/tests.rs b/substrate/client/network/src/protocol/notifications/service/tests.rs index 02ba9e1711c3856df095c8a31688e3388bf0bb00..238e0ccf566898d9c19431a0d00bc14284d449e4 100644 --- a/substrate/client/network/src/protocol/notifications/service/tests.rs +++ b/substrate/client/network/src/protocol/notifications/service/tests.rs @@ -437,7 +437,7 @@ async fn peer_disconnects_then_async_notification_is_sent() { notif.send_async_notification(&peer_id, vec![1, 3, 3, 7]).await { } else { - panic!("invalid state after calling `send_async_notificatio()` on closed connection") + panic!("invalid state after calling `send_async_notification()` on closed connection") } } diff --git a/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs b/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs index 4e1c033f33b68e551fbd98e9c06edcbdd7173b5c..85209a888cd9d935ea6533b64c003e5ee407eeca 100644 --- a/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs +++ b/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs @@ -188,7 +188,7 @@ where } } -/// Yielded by the [`NotificationsIn`] after a successfuly upgrade. +/// Yielded by the [`NotificationsIn`] after a successfully upgrade. pub struct NotificationsInOpen { /// Handshake sent by the remote. pub handshake: Vec, @@ -415,7 +415,7 @@ where } } -/// Yielded by the [`NotificationsOut`] after a successfuly upgrade. +/// Yielded by the [`NotificationsOut`] after a successfully upgrade. pub struct NotificationsOutOpen { /// Handshake returned by the remote. pub handshake: Vec, diff --git a/substrate/client/network/src/protocol_controller.rs b/substrate/client/network/src/protocol_controller.rs index 4c8f119baa203b3942f4156e2f92efb72007c77f..7f851fd8e9c99c10ff4b93e2f3ca6836ffc6de80 100644 --- a/substrate/client/network/src/protocol_controller.rs +++ b/substrate/client/network/src/protocol_controller.rs @@ -448,7 +448,7 @@ impl ProtocolController { self.peer_store.report_disconnect(peer_id); } - /// Ask `Peerset` if the peer has a reputation value not sufficent for connection with it. + /// Ask `Peerset` if the peer has a reputation value not sufficient for connection with it. fn is_banned(&self, peer_id: &PeerId) -> bool { self.peer_store.is_banned(peer_id) } @@ -2020,7 +2020,7 @@ mod tests { ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store)); assert!(matches!(controller.reserved_nodes.get(&reserved1), Some(PeerState::NotConnected))); - // Initiate connectios + // Initiate connections controller.alloc_slots(); assert!(matches!(controller.reserved_nodes.get(&reserved1), Some(PeerState::NotConnected))); assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); diff --git a/substrate/client/network/statement/Cargo.toml b/substrate/client/network/statement/Cargo.toml index b6efee5d9d36ae9deffe9ecd4951176c829de0bc..635cfc5d0d5ecf21268822e39a88961b85190a4e 100644 --- a/substrate/client/network/statement/Cargo.toml +++ b/substrate/client/network/statement/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] array-bytes = "6.1" async-channel = "1.8.0" codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } -futures = "0.3.21" +futures = "0.3.30" libp2p = "0.51.4" log = { workspace = true, default-features = true } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } diff --git a/substrate/client/network/sync/Cargo.toml b/substrate/client/network/sync/Cargo.toml index 32ba3b6356c0ceb0dcf38bda7d333ffddf67d238..6b46d67a3cae7a32bec276108ccef1782de0943a 100644 --- a/substrate/client/network/sync/Cargo.toml +++ b/substrate/client/network/sync/Cargo.toml @@ -21,9 +21,9 @@ prost-build = "0.11" [dependencies] array-bytes = "6.1" async-channel = "1.8.0" -async-trait = "0.1.74" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" libp2p = "0.51.4" log = { workspace = true, default-features = true } diff --git a/substrate/client/network/sync/src/engine.rs b/substrate/client/network/sync/src/engine.rs index 24640fdc45576191f30d190a39ee8186d27520c8..ff40ae95624e198c4eb5af64c5af85f9cd180c9a 100644 --- a/substrate/client/network/sync/src/engine.rs +++ b/substrate/client/network/sync/src/engine.rs @@ -110,7 +110,7 @@ const INACTIVITY_EVICT_THRESHOLD: Duration = Duration::from_secs(30); /// Parachain collator may incorrectly get evicted because it's waiting to receive a number of /// relaychain blocks before it can start creating parachain blocks. During this wait, /// `SyncingEngine` still counts it as active and as the peer is not sending blocks, it may get -/// evicted if a block is not received within the first 30 secons since the peer connected. +/// evicted if a block is not received within the first 30 seconds since the peer connected. /// /// To prevent this from happening, define a threshold for how long `SyncingEngine` should wait /// before it starts evicting peers. @@ -424,7 +424,7 @@ where .expect("Genesis block exists; qed"), ); - // Split warp sync params into warp sync config and a channel to retreive target block + // Split warp sync params into warp sync config and a channel to retrieve target block // header. let (warp_sync_config, warp_sync_target_block_header_rx) = warp_sync_params.map_or((None, None), |params| { @@ -1057,7 +1057,7 @@ where // still be under validation. If the peer has different genesis than the // local node the validation fails but the peer cannot be reported in // `validate_connection()` as that is also called by - // `ValiateInboundSubstream` which means that the peer is still being + // `ValidateInboundSubstream` which means that the peer is still being // validated and banning the peer when handling that event would // result in peer getting dropped twice. // diff --git a/substrate/client/network/sync/src/strategy.rs b/substrate/client/network/sync/src/strategy.rs index dabcf37ae6321a4501095a4ba3140c1ccf424b74..610fd7c65606b18d7eb9dc018636f1fac3ff35f5 100644 --- a/substrate/client/network/sync/src/strategy.rs +++ b/substrate/client/network/sync/src/strategy.rs @@ -185,7 +185,7 @@ where + Sync + 'static, { - /// Initialize a new syncing startegy. + /// Initialize a new syncing strategy. pub fn new( config: SyncingConfig, client: Arc, @@ -418,7 +418,7 @@ where self.state.is_some() || match self.chain_sync { Some(ref s) => s.status().state.is_major_syncing(), - None => unreachable!("At least one syncing startegy is active; qed"), + None => unreachable!("At least one syncing strategy is active; qed"), } } @@ -429,7 +429,7 @@ where /// Returns the current sync status. pub fn status(&self) -> SyncStatus { - // This function presumes that startegies are executed serially and must be refactored + // This function presumes that strategies are executed serially and must be refactored // once we have parallel strategies. if let Some(ref warp) = self.warp { warp.status() @@ -438,7 +438,7 @@ where } else if let Some(ref chain_sync) = self.chain_sync { chain_sync.status() } else { - unreachable!("At least one syncing startegy is always active; qed") + unreachable!("At least one syncing strategy is always active; qed") } } @@ -518,7 +518,7 @@ where /// Proceed with the next strategy if the active one finished. pub fn proceed_to_next(&mut self) -> Result<(), ClientError> { - // The strategies are switched as `WarpSync` -> `StateStartegy` -> `ChainSync`. + // The strategies are switched as `WarpSync` -> `StateStrategy` -> `ChainSync`. if let Some(ref mut warp) = self.warp { match warp.take_result() { Some(res) => { @@ -569,7 +569,7 @@ where }, } } else if let Some(state) = &self.state { - if state.is_succeded() { + if state.is_succeeded() { info!(target: LOG_TARGET, "State sync is complete, continuing with block sync."); } else { error!(target: LOG_TARGET, "State sync failed. Falling back to full sync."); diff --git a/substrate/client/network/sync/src/strategy/chain_sync.rs b/substrate/client/network/sync/src/strategy/chain_sync.rs index ad0c75363e78ac948b4e10352328c64cefb792a1..da04bbbeccc889250f57818e9a14e4f25400093d 100644 --- a/substrate/client/network/sync/src/strategy/chain_sync.rs +++ b/substrate/client/network/sync/src/strategy/chain_sync.rs @@ -117,7 +117,7 @@ mod rep { /// Reputation change for peers which send us a block with bad justifications. pub const BAD_JUSTIFICATION: Rep = Rep::new(-(1 << 16), "Bad justification"); - /// Reputation change when a peer sent us invlid ancestry result. + /// Reputation change when a peer sent us invalid ancestry result. pub const UNKNOWN_ANCESTOR: Rep = Rep::new(-(1 << 16), "DB Error"); /// Peer response data does not have requested bits. @@ -1334,7 +1334,7 @@ where PeerSyncState::DownloadingJustification(_) => { // Peers that were downloading justifications // should be kept in that state. - // We make sure our commmon number is at least something we have. + // We make sure our common number is at least something we have. trace!( target: LOG_TARGET, "Keeping peer {} after restart, updating common number from={} => to={} (our best).", diff --git a/substrate/client/network/sync/src/strategy/chain_sync/test.rs b/substrate/client/network/sync/src/strategy/chain_sync/test.rs index 127b6862f0e0dca6c01a88bc6ee83b1f57e7c4dc..cd955113542be598e18c94057d3ad056c95a6002 100644 --- a/substrate/client/network/sync/src/strategy/chain_sync/test.rs +++ b/substrate/client/network/sync/src/strategy/chain_sync/test.rs @@ -189,7 +189,7 @@ fn restart_doesnt_affect_peers_downloading_finality_data() { assert_eq!(sync.peers.get(&peer_id3).unwrap().common_number, 50); } -/// Send a block annoucnement for the given `header`. +/// Send a block announcement for the given `header`. fn send_block_announce(header: Header, peer_id: PeerId, sync: &mut ChainSync) { let announce = BlockAnnounce { header: header.clone(), @@ -278,7 +278,7 @@ fn unwrap_from_block_number(from: FromBlock) -> u64 { /// announcement from this node in its sync process. Meaning our common number didn't change. It /// is now expected that we start an ancestor search to find the common number. #[test] -fn do_ancestor_search_when_common_block_to_best_qeued_gap_is_to_big() { +fn do_ancestor_search_when_common_block_to_best_queued_gap_is_to_big() { sp_tracing::try_init_simple(); let blocks = { @@ -472,7 +472,7 @@ fn can_sync_huge_fork() { let actions = sync.take_actions().collect::>(); request = if actions.is_empty() { - // We found the ancenstor + // We found the ancestor break } else { assert_eq!(actions.len(), 1); @@ -607,7 +607,7 @@ fn syncs_fork_without_duplicate_requests() { let actions = sync.take_actions().collect::>(); request = if actions.is_empty() { - // We found the ancenstor + // We found the ancestor break } else { assert_eq!(actions.len(), 1); diff --git a/substrate/client/network/sync/src/strategy/state.rs b/substrate/client/network/sync/src/strategy/state.rs index 12d36ff9e01a9c16bfee482c5e9e641b13bb3b87..6d3b215f7f3210073c2ab61a2e57b0abf9652150 100644 --- a/substrate/client/network/sync/src/strategy/state.rs +++ b/substrate/client/network/sync/src/strategy/state.rs @@ -79,7 +79,7 @@ pub struct StateStrategy { state_sync: Box>, peers: HashMap>, actions: Vec>, - succeded: bool, + succeeded: bool, } impl StateStrategy { @@ -110,7 +110,7 @@ impl StateStrategy { )), peers, actions: Vec::new(), - succeded: false, + succeeded: false, } } @@ -129,7 +129,7 @@ impl StateStrategy { }) .collect(), actions: Vec::new(), - succeded: false, + succeeded: false, } } @@ -260,7 +260,7 @@ impl StateStrategy { "Failed to import target block with state: {e:?}." ); }); - self.succeded |= results.into_iter().any(|result| result.is_ok()); + self.succeeded |= results.into_iter().any(|result| result.is_ok()); self.actions.push(StateStrategyAction::Finished); } } @@ -342,10 +342,10 @@ impl StateStrategy { std::mem::take(&mut self.actions).into_iter() } - /// Check if state sync has succeded. + /// Check if state sync has succeeded. #[must_use] - pub fn is_succeded(&self) -> bool { - self.succeded + pub fn is_succeeded(&self) -> bool { + self.succeeded } } @@ -669,7 +669,7 @@ mod test { } #[test] - fn succesfully_importing_target_block_finishes_strategy() { + fn successfully_importing_target_block_finishes_strategy() { let target_hash = Hash::random(); let mut state_sync_provider = MockStateSync::::new(); state_sync_provider.expect_target_hash().return_const(target_hash); diff --git a/substrate/client/network/sync/src/strategy/warp.rs b/substrate/client/network/sync/src/strategy/warp.rs index 7935b5f29b68df1ac259e325e66522afd9f55e4c..c7b79228efeef37c5579462bc3fd9ba1978b78cb 100644 --- a/substrate/client/network/sync/src/strategy/warp.rs +++ b/substrate/client/network/sync/src/strategy/warp.rs @@ -968,7 +968,7 @@ mod test { warp_sync.on_warp_proof_response(&request_peer_id, EncodedProof(Vec::new())); - // We only interested in alredy generated actions, not new requests. + // We only interested in already generated actions, not new requests. let actions = std::mem::take(&mut warp_sync.actions); assert_eq!(actions.len(), 1); assert!(matches!( diff --git a/substrate/client/network/sync/src/warp_request_handler.rs b/substrate/client/network/sync/src/warp_request_handler.rs index 39cf1c5d8067e154d0e414fc42069f1f7500b097..eda67cac95ffae2f423ba062308192371c1a2f20 100644 --- a/substrate/client/network/sync/src/warp_request_handler.rs +++ b/substrate/client/network/sync/src/warp_request_handler.rs @@ -57,7 +57,7 @@ pub fn generate_request_response_config>( } } -/// Generate the grandpa warp sync protocol name from the genesi hash and fork id. +/// Generate the grandpa warp sync protocol name from the genesis hash and fork id. fn generate_protocol_name>(genesis_hash: Hash, fork_id: Option<&str>) -> String { let genesis_hash = genesis_hash.as_ref(); if let Some(fork_id) = fork_id { diff --git a/substrate/client/network/test/Cargo.toml b/substrate/client/network/test/Cargo.toml index 4f57287a39cc8da10d3a30f769d7d584e16f0904..56fc89e1b2b95d9a5570ad4c833db01bb22f15c3 100644 --- a/substrate/client/network/test/Cargo.toml +++ b/substrate/client/network/test/Cargo.toml @@ -16,9 +16,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -tokio = "1.22.0" -async-trait = "0.1.74" -futures = "0.3.21" +tokio = "1.37" +async-trait = "0.1.79" +futures = "0.3.30" futures-timer = "3.0.1" libp2p = "0.51.4" log = { workspace = true, default-features = true } diff --git a/substrate/client/network/test/src/sync.rs b/substrate/client/network/test/src/sync.rs index c025a8262f0e53e15200749f30ba22696930c52f..f1c1b7414303202346a35aaf335d162997b504c3 100644 --- a/substrate/client/network/test/src/sync.rs +++ b/substrate/client/network/test/src/sync.rs @@ -749,7 +749,7 @@ async fn sync_blocks_when_block_announce_validator_says_it_is_new_best() { } } -/// Waits for some time until the validation is successfull. +/// Waits for some time until the validation is successful. struct DeferredBlockAnnounceValidator; impl BlockAnnounceValidator for DeferredBlockAnnounceValidator { diff --git a/substrate/client/network/transactions/Cargo.toml b/substrate/client/network/transactions/Cargo.toml index 01c8ac8814d6aff1448caa18bbe8f3287139553f..0ab7386ef21ffd7fb77a440b751330ac84bce75f 100644 --- a/substrate/client/network/transactions/Cargo.toml +++ b/substrate/client/network/transactions/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = "6.1" codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } -futures = "0.3.21" +futures = "0.3.30" libp2p = "0.51.4" log = { workspace = true, default-features = true } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } diff --git a/substrate/client/offchain/Cargo.toml b/substrate/client/offchain/Cargo.toml index caa4bb03f40cba37b10444af6820833f8b4a8b84..a3a3cfaa8fca1afb74a21c00bdf71abf11ff4cc7 100644 --- a/substrate/client/offchain/Cargo.toml +++ b/substrate/client/offchain/Cargo.toml @@ -20,7 +20,7 @@ array-bytes = "6.1" bytes = "1.1" codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } fnv = "1.0.6" -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" hyper = { version = "0.14.16", features = ["http2", "stream"] } hyper-rustls = { version = "0.24.0", features = ["http2"] } @@ -46,7 +46,7 @@ log = { workspace = true, default-features = true } [dev-dependencies] lazy_static = "1.4.0" -tokio = "1.22.0" +tokio = "1.37" sc-block-builder = { path = "../block-builder" } sc-client-db = { path = "../db", default-features = true } sc-transaction-pool = { path = "../transaction-pool" } diff --git a/substrate/client/offchain/src/api/http.rs b/substrate/client/offchain/src/api/http.rs index 7ca5e3fd13af788c7dee986202471a4ad2a9de6e..46f573341c57948539ef969f6f238923cd2fff22 100644 --- a/substrate/client/offchain/src/api/http.rs +++ b/substrate/client/offchain/src/api/http.rs @@ -604,7 +604,7 @@ enum WorkerToApi { /// because we don't want the `HttpApi` to have to drive the reading. /// Instead, reading an item from the channel will notify the worker task, which will push /// the next item. - /// Can also be used to send an error, in case an error happend on the HTTP socket. After + /// Can also be used to send an error, in case an error happened on the HTTP socket. After /// an error is sent, the channel will close. body: mpsc::Receiver>, }, diff --git a/substrate/client/rpc-api/Cargo.toml b/substrate/client/rpc-api/Cargo.toml index 1b7af6a4a52fe650dd325cc38d92daddd9c7a9fa..169714d22453d92276b02d949fa86e06862467b4 100644 --- a/substrate/client/rpc-api/Cargo.toml +++ b/substrate/client/rpc-api/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } thiserror = { workspace = true } diff --git a/substrate/client/rpc-servers/Cargo.toml b/substrate/client/rpc-servers/Cargo.toml index 3adc81c57d594a355212a88f83682a0441bc7acc..bc21b5b1582f95165ff6d529361c85a413fb915e 100644 --- a/substrate/client/rpc-servers/Cargo.toml +++ b/substrate/client/rpc-servers/Cargo.toml @@ -25,5 +25,5 @@ tower-http = { version = "0.4.0", features = ["cors"] } tower = { version = "0.4.13", features = ["util"] } http = "0.2.8" hyper = "0.14.27" -futures = "0.3.29" +futures = "0.3.30" governor = "0.6.0" diff --git a/substrate/client/rpc-servers/src/middleware/metrics.rs b/substrate/client/rpc-servers/src/middleware/metrics.rs index 17849dc0c44846c10ed06b0f86d39783c5d29dc1..688c3c2a9fc42a8dddfcc2487b921ffca8616292 100644 --- a/substrate/client/rpc-servers/src/middleware/metrics.rs +++ b/substrate/client/rpc-servers/src/middleware/metrics.rs @@ -182,7 +182,7 @@ impl RpcMetrics { transport_label, req.method_name(), // the label "is_error", so `success` should be regarded as false - // and vice-versa to be registrered correctly. + // and vice-versa to be registered correctly. if rp.is_success() { "false" } else { "true" }, if is_rate_limited { "true" } else { "false" }, ]) diff --git a/substrate/client/rpc-spec-v2/Cargo.toml b/substrate/client/rpc-spec-v2/Cargo.toml index c62b3e789d389047f66004f5f76a6bb947ce8b9e..e2612d914542ecd74758fadaa9ac3dc4b01bc7c1 100644 --- a/substrate/client/rpc-spec-v2/Cargo.toml +++ b/substrate/client/rpc-spec-v2/Cargo.toml @@ -34,7 +34,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1" } thiserror = { workspace = true } serde = { workspace = true, default-features = true } hex = "0.4" -futures = "0.3.21" +futures = "0.3.30" parking_lot = "0.12.1" tokio-stream = { version = "0.1.14", features = ["sync"] } tokio = { version = "1.22.0", features = ["sync"] } @@ -44,6 +44,7 @@ futures-util = { version = "0.3.30", default-features = false } rand = "0.8.5" [dev-dependencies] +jsonrpsee = { version = "0.22", features = ["server", "ws-client"] } serde_json = { workspace = true, default-features = true } tokio = { version = "1.22.0", features = ["macros"] } substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } diff --git a/substrate/client/rpc-spec-v2/src/archive/tests.rs b/substrate/client/rpc-spec-v2/src/archive/tests.rs index 1803ffa3a3183628a8eff81a10c05efdddc5e962..de71ed82a128845d98ef5b6f96e0896de1a71ef1 100644 --- a/substrate/client/rpc-spec-v2/src/archive/tests.rs +++ b/substrate/client/rpc-spec-v2/src/archive/tests.rs @@ -435,7 +435,7 @@ async fn archive_storage_closest_merkle_value() { /// The core of this test. /// - /// Checks keys that are exact match, keys with descedant and keys that should not return + /// Checks keys that are exact match, keys with descendant and keys that should not return /// values. /// /// Returns (key, merkle value) pairs. @@ -459,7 +459,7 @@ async fn archive_storage_closest_merkle_value() { query_type: StorageQueryType::ClosestDescendantMerkleValue, pagination_start_key: None, }, - // Key with descedent. + // Key with descendant. PaginatedStorageQuery { key: hex_string(b":A"), query_type: StorageQueryType::ClosestDescendantMerkleValue, diff --git a/substrate/client/rpc-spec-v2/src/chain_head/api.rs b/substrate/client/rpc-spec-v2/src/chain_head/api.rs index 00000e1fb277bbc49aeb343d233b00d408fc45c4..3851adac2644d09b824383dba2e80d6877eff01a 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/api.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/api.rs @@ -27,7 +27,7 @@ use crate::{ common::events::StorageQuery, }; use jsonrpsee::{proc_macros::rpc, server::ResponsePayload}; -use sp_rpc::list::ListOrValue; +pub use sp_rpc::list::ListOrValue; #[rpc(client, server)] pub trait ChainHeadApi { @@ -54,8 +54,8 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_unstable_body", blocking)] - fn chain_head_unstable_body( + #[method(name = "chainHead_unstable_body", raw_method)] + async fn chain_head_unstable_body( &self, follow_subscription: String, hash: Hash, @@ -73,8 +73,8 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_unstable_header", blocking)] - fn chain_head_unstable_header( + #[method(name = "chainHead_unstable_header", raw_method)] + async fn chain_head_unstable_header( &self, follow_subscription: String, hash: Hash, @@ -85,8 +85,8 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_unstable_storage", blocking)] - fn chain_head_unstable_storage( + #[method(name = "chainHead_unstable_storage", raw_method)] + async fn chain_head_unstable_storage( &self, follow_subscription: String, hash: Hash, @@ -99,8 +99,8 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_unstable_call", blocking)] - fn chain_head_unstable_call( + #[method(name = "chainHead_unstable_call", raw_method)] + async fn chain_head_unstable_call( &self, follow_subscription: String, hash: Hash, @@ -118,8 +118,8 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_unstable_unpin", blocking)] - fn chain_head_unstable_unpin( + #[method(name = "chainHead_unstable_unpin", raw_method)] + async fn chain_head_unstable_unpin( &self, follow_subscription: String, hash_or_hashes: ListOrValue, @@ -131,8 +131,8 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_unstable_continue", blocking)] - fn chain_head_unstable_continue( + #[method(name = "chainHead_unstable_continue", raw_method)] + async fn chain_head_unstable_continue( &self, follow_subscription: String, operation_id: String, @@ -145,8 +145,8 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_unstable_stopOperation", blocking)] - fn chain_head_unstable_stop_operation( + #[method(name = "chainHead_unstable_stopOperation", raw_method)] + async fn chain_head_unstable_stop_operation( &self, follow_subscription: String, operation_id: String, diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs index 2bda22b452391e5b3ef3f7367b43331079591aa3..86d9a726d7bec371c157014adac0dfb1a926fb7e 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs @@ -34,10 +34,10 @@ use crate::{ hex_string, SubscriptionTaskExecutor, }; use codec::Encode; -use futures::future::FutureExt; +use futures::{channel::oneshot, future::FutureExt}; use jsonrpsee::{ - core::async_trait, server::ResponsePayload, types::SubscriptionId, MethodResponseFuture, - PendingSubscriptionSink, SubscriptionSink, + core::async_trait, server::ResponsePayload, types::SubscriptionId, ConnectionDetails, + MethodResponseFuture, PendingSubscriptionSink, SubscriptionSink, }; use log::debug; use sc_client_api::{ @@ -62,9 +62,14 @@ pub struct ChainHeadConfig { pub subscription_max_pinned_duration: Duration, /// The maximum number of ongoing operations per subscription. pub subscription_max_ongoing_operations: usize, + /// Stop all subscriptions if the distance between the leaves and the current finalized + /// block is larger than this value. + pub max_lagging_distance: usize, /// The maximum number of items reported by the `chainHead_storage` before /// pagination is required. pub operation_max_storage_items: usize, + /// The maximum number of `chainHead_follow` subscriptions per connection. + pub max_follow_subscriptions_per_connection: usize, } /// Maximum pinned blocks across all connections. @@ -86,13 +91,22 @@ const MAX_ONGOING_OPERATIONS: usize = 16; /// before paginations is required. const MAX_STORAGE_ITER_ITEMS: usize = 5; +/// Stop all subscriptions if the distance between the leaves and the current finalized +/// block is larger than this value. +const MAX_LAGGING_DISTANCE: usize = 128; + +/// The maximum number of `chainHead_follow` subscriptions per connection. +const MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION: usize = 4; + impl Default for ChainHeadConfig { fn default() -> Self { ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: MAX_PINNED_DURATION, subscription_max_ongoing_operations: MAX_ONGOING_OPERATIONS, + max_lagging_distance: MAX_LAGGING_DISTANCE, operation_max_storage_items: MAX_STORAGE_ITER_ITEMS, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, } } } @@ -106,10 +120,13 @@ pub struct ChainHead, Block: BlockT, Client> { /// Executor to spawn subscriptions. executor: SubscriptionTaskExecutor, /// Keep track of the pinned blocks for each subscription. - subscriptions: Arc>, + subscriptions: SubscriptionManagement, /// The maximum number of items reported by the `chainHead_storage` before /// pagination is required. operation_max_storage_items: usize, + /// Stop all subscriptions if the distance between the leaves and the current finalized + /// block is larger than this value. + max_lagging_distance: usize, /// Phantom member to pin the block type. _phantom: PhantomData, } @@ -126,13 +143,15 @@ impl, Block: BlockT, Client> ChainHead { client, backend: backend.clone(), executor, - subscriptions: Arc::new(SubscriptionManagement::new( + subscriptions: SubscriptionManagement::new( config.global_max_pinned_blocks, config.subscription_max_pinned_duration, config.subscription_max_ongoing_operations, + config.max_follow_subscriptions_per_connection, backend, - )), + ), operation_max_storage_items: config.operation_max_storage_items, + max_lagging_distance: config.max_lagging_distance, _phantom: PhantomData, } } @@ -180,17 +199,29 @@ where let subscriptions = self.subscriptions.clone(); let backend = self.backend.clone(); let client = self.client.clone(); + let max_lagging_distance = self.max_lagging_distance; let fut = async move { + // Ensure the current connection ID has enough space to accept a new subscription. + let connection_id = pending.connection_id(); + // The RAII `reserved_subscription` will clean up resources on drop: + // - free the reserved subscription for the connection ID. + // - remove the subscription ID from the subscription management. + let Some(mut reserved_subscription) = subscriptions.reserve_subscription(connection_id) + else { + pending.reject(ChainHeadRpcError::ReachedLimits).await; + return + }; + let Ok(sink) = pending.accept().await else { return }; let sub_id = read_subscription_id_as_string(&sink); - // Keep track of the subscription. - let Some(sub_data) = subscriptions.insert_subscription(sub_id.clone(), with_runtime) + let Some(sub_data) = + reserved_subscription.insert_subscription(sub_id.clone(), with_runtime) else { - // Inserting the subscription can only fail if the JsonRPSee - // generated a duplicate subscription ID. + // Inserting the subscription can only fail if the JsonRPSee generated a duplicate + // subscription ID. debug!(target: LOG_TARGET, "[follow][id={:?}] Subscription already accepted", sub_id); let msg = to_sub_message(&sink, &FollowEvent::::Stop); let _ = sink.send(msg).await; @@ -201,91 +232,121 @@ where let mut chain_head_follow = ChainHeadFollower::new( client, backend, - subscriptions.clone(), + subscriptions, with_runtime, sub_id.clone(), + max_lagging_distance, ); + let result = chain_head_follow.generate_events(sink, sub_data).await; + if let Err(SubscriptionManagementError::BlockDistanceTooLarge) = result { + debug!(target: LOG_TARGET, "[follow][id={:?}] All subscriptions are stopped", sub_id); + reserved_subscription.stop_all_subscriptions(); + } - chain_head_follow.generate_events(sink, sub_data).await; - - subscriptions.remove_subscription(&sub_id); debug!(target: LOG_TARGET, "[follow][id={:?}] Subscription removed", sub_id); }; self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); } - fn chain_head_unstable_body( + async fn chain_head_unstable_body( &self, + connection_details: ConnectionDetails, follow_subscription: String, hash: Block::Hash, ) -> ResponsePayload<'static, MethodResponse> { - let mut block_guard = match self.subscriptions.lock_block(&follow_subscription, hash, 1) { - Ok(block) => block, - Err(SubscriptionManagementError::SubscriptionAbsent) | - Err(SubscriptionManagementError::ExceededLimits) => - return ResponsePayload::success(MethodResponse::LimitReached), - Err(SubscriptionManagementError::BlockHashAbsent) => { - // Block is not part of the subscription. - return ResponsePayload::error(ChainHeadRpcError::InvalidBlock); - }, - Err(_) => return ResponsePayload::error(ChainHeadRpcError::InvalidBlock), - }; + if !self + .subscriptions + .contains_subscription(connection_details.id(), &follow_subscription) + { + // The spec says to return `LimitReached` if the follow subscription is invalid or + // stale. + return ResponsePayload::success(MethodResponse::LimitReached); + } - let operation_id = block_guard.operation().operation_id(); + let client = self.client.clone(); + let subscriptions = self.subscriptions.clone(); + let executor = self.executor.clone(); + + let result = spawn_blocking(&self.executor, async move { + let mut block_guard = match subscriptions.lock_block(&follow_subscription, hash, 1) { + Ok(block) => block, + Err(SubscriptionManagementError::SubscriptionAbsent) | + Err(SubscriptionManagementError::ExceededLimits) => + return ResponsePayload::success(MethodResponse::LimitReached), + Err(SubscriptionManagementError::BlockHashAbsent) => { + // Block is not part of the subscription. + return ResponsePayload::error(ChainHeadRpcError::InvalidBlock); + }, + Err(_) => return ResponsePayload::error(ChainHeadRpcError::InvalidBlock), + }; - let event = match self.client.block(hash) { - Ok(Some(signed_block)) => { - let extrinsics = signed_block - .block - .extrinsics() - .iter() - .map(|extrinsic| hex_string(&extrinsic.encode())) - .collect(); - FollowEvent::::OperationBodyDone(OperationBodyDone { + let operation_id = block_guard.operation().operation_id(); + + let event = match client.block(hash) { + Ok(Some(signed_block)) => { + let extrinsics = signed_block + .block + .extrinsics() + .iter() + .map(|extrinsic| hex_string(&extrinsic.encode())) + .collect(); + FollowEvent::::OperationBodyDone(OperationBodyDone { + operation_id: operation_id.clone(), + value: extrinsics, + }) + }, + Ok(None) => { + // The block's body was pruned. This subscription ID has become invalid. + debug!( + target: LOG_TARGET, + "[body][id={:?}] Stopping subscription because hash={:?} was pruned", + &follow_subscription, + hash + ); + subscriptions.remove_subscription(&follow_subscription); + return ResponsePayload::error(ChainHeadRpcError::InvalidBlock) + }, + Err(error) => FollowEvent::::OperationError(OperationError { operation_id: operation_id.clone(), - value: extrinsics, - }) - }, - Ok(None) => { - // The block's body was pruned. This subscription ID has become invalid. - debug!( - target: LOG_TARGET, - "[body][id={:?}] Stopping subscription because hash={:?} was pruned", - &follow_subscription, - hash - ); - self.subscriptions.remove_subscription(&follow_subscription); - return ResponsePayload::error(ChainHeadRpcError::InvalidBlock) - }, - Err(error) => FollowEvent::::OperationError(OperationError { - operation_id: operation_id.clone(), - error: error.to_string(), - }), - }; + error: error.to_string(), + }), + }; - let (rp, rp_fut) = method_started_response(operation_id, None); + let (rp, rp_fut) = method_started_response(operation_id, None); + let fut = async move { + // Wait for the server to send out the response and if it produces an error no event + // should be generated. + if rp_fut.await.is_err() { + return; + } - let fut = async move { - // Events should only by generated - // if the response was successfully propagated. - if rp_fut.await.is_err() { - return; - } - let _ = block_guard.response_sender().unbounded_send(event); - }; + let _ = block_guard.response_sender().unbounded_send(event); + }; + executor.spawn_blocking("substrate-rpc-subscription", Some("rpc"), fut.boxed()); - self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); + rp + }); - rp + result + .await + .unwrap_or_else(|_| ResponsePayload::success(MethodResponse::LimitReached)) } - fn chain_head_unstable_header( + async fn chain_head_unstable_header( &self, + connection_details: ConnectionDetails, follow_subscription: String, hash: Block::Hash, ) -> Result, ChainHeadRpcError> { - let _block_guard = match self.subscriptions.lock_block(&follow_subscription, hash, 1) { + if !self + .subscriptions + .contains_subscription(connection_details.id(), &follow_subscription) + { + return Ok(None); + } + + let block_guard = match self.subscriptions.lock_block(&follow_subscription, hash, 1) { Ok(block) => block, Err(SubscriptionManagementError::SubscriptionAbsent) | Err(SubscriptionManagementError::ExceededLimits) => return Ok(None), @@ -296,19 +357,35 @@ where Err(_) => return Err(ChainHeadRpcError::InvalidBlock.into()), }; - self.client - .header(hash) - .map(|opt_header| opt_header.map(|h| hex_string(&h.encode()))) - .map_err(|err| ChainHeadRpcError::InternalError(err.to_string())) + let client = self.client.clone(); + let result = spawn_blocking(&self.executor, async move { + let _block_guard = block_guard; + + client + .header(hash) + .map(|opt_header| opt_header.map(|h| hex_string(&h.encode()))) + .map_err(|err| ChainHeadRpcError::InternalError(err.to_string())) + }); + result.await.unwrap_or_else(|_| Ok(None)) } - fn chain_head_unstable_storage( + async fn chain_head_unstable_storage( &self, + connection_details: ConnectionDetails, follow_subscription: String, hash: Block::Hash, items: Vec>, child_trie: Option, ) -> ResponsePayload<'static, MethodResponse> { + if !self + .subscriptions + .contains_subscription(connection_details.id(), &follow_subscription) + { + // The spec says to return `LimitReached` if the follow subscription is invalid or + // stale. + return ResponsePayload::success(MethodResponse::LimitReached); + } + // Gain control over parameter parsing and returned error. let items = match items .into_iter() @@ -357,25 +434,25 @@ where let mut items = items; items.truncate(num_operations); - let (rp, rp_is_success) = method_started_response(operation_id, Some(discarded)); - + let (rp, rp_fut) = method_started_response(operation_id, Some(discarded)); let fut = async move { - // Events should only by generated - // if the response was successfully propagated. - if rp_is_success.await.is_err() { + // Wait for the server to send out the response and if it produces an error no event + // should be generated. + if rp_fut.await.is_err() { return; } + storage_client.generate_events(block_guard, hash, items, child_trie).await; }; - self.executor .spawn_blocking("substrate-rpc-subscription", Some("rpc"), fut.boxed()); rp } - fn chain_head_unstable_call( + async fn chain_head_unstable_call( &self, + connection_details: ConnectionDetails, follow_subscription: String, hash: Block::Hash, function: String, @@ -386,6 +463,15 @@ where Err(err) => return ResponsePayload::error(err), }; + if !self + .subscriptions + .contains_subscription(connection_details.id(), &follow_subscription) + { + // The spec says to return `LimitReached` if the follow subscription is invalid or + // stale. + return ResponsePayload::success(MethodResponse::LimitReached); + } + let mut block_guard = match self.subscriptions.lock_block(&follow_subscription, hash, 1) { Ok(block) => block, Err(SubscriptionManagementError::SubscriptionAbsent) | @@ -408,44 +494,53 @@ where } let operation_id = block_guard.operation().operation_id(); - let event = self - .client - .executor() - .call(hash, &function, &call_parameters, CallContext::Offchain) - .map(|result| { - FollowEvent::::OperationCallDone(OperationCallDone { - operation_id: operation_id.clone(), - output: hex_string(&result), - }) - }) - .unwrap_or_else(|error| { - FollowEvent::::OperationError(OperationError { - operation_id: operation_id.clone(), - error: error.to_string(), - }) - }); - - let (rp, rp_fut) = method_started_response(operation_id, None); + let client = self.client.clone(); + let (rp, rp_fut) = method_started_response(operation_id.clone(), None); let fut = async move { - // Events should only by generated - // if the response was successfully propagated. + // Wait for the server to send out the response and if it produces an error no event + // should be generated. if rp_fut.await.is_err() { - return; + return } + + let event = client + .executor() + .call(hash, &function, &call_parameters, CallContext::Offchain) + .map(|result| { + FollowEvent::::OperationCallDone(OperationCallDone { + operation_id: operation_id.clone(), + output: hex_string(&result), + }) + }) + .unwrap_or_else(|error| { + FollowEvent::::OperationError(OperationError { + operation_id: operation_id.clone(), + error: error.to_string(), + }) + }); + let _ = block_guard.response_sender().unbounded_send(event); }; - - self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); + self.executor + .spawn_blocking("substrate-rpc-subscription", Some("rpc"), fut.boxed()); rp } - fn chain_head_unstable_unpin( + async fn chain_head_unstable_unpin( &self, + connection_details: ConnectionDetails, follow_subscription: String, hash_or_hashes: ListOrValue, ) -> Result<(), ChainHeadRpcError> { + if !self + .subscriptions + .contains_subscription(connection_details.id(), &follow_subscription) + { + return Ok(()); + } + let result = match hash_or_hashes { ListOrValue::Value(hash) => self.subscriptions.unpin_blocks(&follow_subscription, [hash]), @@ -469,11 +564,19 @@ where } } - fn chain_head_unstable_continue( + async fn chain_head_unstable_continue( &self, + connection_details: ConnectionDetails, follow_subscription: String, operation_id: String, ) -> Result<(), ChainHeadRpcError> { + if !self + .subscriptions + .contains_subscription(connection_details.id(), &follow_subscription) + { + return Ok(()) + } + let Some(operation) = self.subscriptions.get_operation(&follow_subscription, &operation_id) else { return Ok(()) @@ -487,11 +590,19 @@ where } } - fn chain_head_unstable_stop_operation( + async fn chain_head_unstable_stop_operation( &self, + connection_details: ConnectionDetails, follow_subscription: String, operation_id: String, ) -> Result<(), ChainHeadRpcError> { + if !self + .subscriptions + .contains_subscription(connection_details.id(), &follow_subscription) + { + return Ok(()) + } + let Some(operation) = self.subscriptions.get_operation(&follow_subscription, &operation_id) else { return Ok(()) @@ -510,3 +621,26 @@ fn method_started_response( let rp = MethodResponse::Started(MethodResponseStarted { operation_id, discarded_items }); ResponsePayload::success(rp).notify_on_completion() } + +/// Spawn a blocking future on the provided executor and return the result on a oneshot channel. +/// +/// This is a wrapper to extract the result of a `executor.spawn_blocking` future. +fn spawn_blocking( + executor: &SubscriptionTaskExecutor, + fut: impl std::future::Future + Send + 'static, +) -> oneshot::Receiver +where + R: Send + 'static, +{ + let (tx, rx) = oneshot::channel(); + + let blocking_fut = async move { + let result = fut.await; + // Send the result back on the channel. + let _ = tx.send(result); + }; + + executor.spawn_blocking("substrate-rpc-subscription", Some("rpc"), blocking_fut.boxed()); + + rx +} diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs index afa99f3aa1648823aee646304c7c2b2cd71da6e2..0d87a45c07e295784bef4c946ef05d3641234ce4 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs @@ -41,12 +41,14 @@ use sp_api::CallApiAt; use sp_blockchain::{ Backend as BlockChainBackend, Error as BlockChainError, HeaderBackend, HeaderMetadata, Info, }; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; +use sp_runtime::{ + traits::{Block as BlockT, Header as HeaderT, NumberFor}, + SaturatedConversion, Saturating, +}; use std::{ collections::{HashSet, VecDeque}, sync::Arc, }; - /// The maximum number of finalized blocks provided by the /// `Initialized` event. const MAX_FINALIZED_BLOCKS: usize = 16; @@ -60,13 +62,16 @@ pub struct ChainHeadFollower, Block: BlockT, Client> { /// Backend of the chain. backend: Arc, /// Subscriptions handle. - sub_handle: Arc>, + sub_handle: SubscriptionManagement, /// Subscription was started with the runtime updates flag. with_runtime: bool, /// Subscription ID. sub_id: String, /// The best reported block by this subscription. best_block_cache: Option, + /// Stop all subscriptions if the distance between the leaves and the current finalized + /// block is larger than this value. + max_lagging_distance: usize, } impl, Block: BlockT, Client> ChainHeadFollower { @@ -74,11 +79,20 @@ impl, Block: BlockT, Client> ChainHeadFollower, backend: Arc, - sub_handle: Arc>, + sub_handle: SubscriptionManagement, with_runtime: bool, sub_id: String, + max_lagging_distance: usize, ) -> Self { - Self { client, backend, sub_handle, with_runtime, sub_id, best_block_cache: None } + Self { + client, + backend, + sub_handle, + with_runtime, + sub_id, + best_block_cache: None, + max_lagging_distance, + } } } @@ -186,6 +200,35 @@ where } } + /// Check the distance between the provided blocks does not exceed a + /// a reasonable range. + /// + /// When the blocks are too far apart (potentially millions of blocks): + /// - Tree route is expensive to calculate. + /// - The RPC layer will not be able to generate the `NewBlock` events for all blocks. + /// + /// This edge-case can happen for parachains where the relay chain syncs slower to + /// the head of the chain than the parachain node that is synced already. + fn distace_within_reason( + &self, + block: Block::Hash, + finalized: Block::Hash, + ) -> Result<(), SubscriptionManagementError> { + let Some(block_num) = self.client.number(block)? else { + return Err(SubscriptionManagementError::BlockHashAbsent) + }; + let Some(finalized_num) = self.client.number(finalized)? else { + return Err(SubscriptionManagementError::BlockHashAbsent) + }; + + let distance: usize = block_num.saturating_sub(finalized_num).saturated_into(); + if distance > self.max_lagging_distance { + return Err(SubscriptionManagementError::BlockDistanceTooLarge); + } + + Ok(()) + } + /// Get the in-memory blocks of the client, starting from the provided finalized hash. /// /// The reported blocks are pinned by this function. @@ -198,6 +241,13 @@ where let mut pruned_forks = HashSet::new(); let mut finalized_block_descendants = Vec::new(); let mut unique_descendants = HashSet::new(); + + // Ensure all leaves are within a reasonable distance from the finalized block, + // before traversing the tree. + for leaf in &leaves { + self.distace_within_reason(*leaf, finalized)?; + } + for leaf in leaves { let tree_route = sp_blockchain::tree_route(blockchain, finalized, leaf)?; @@ -542,11 +592,17 @@ where mut to_ignore: HashSet, sink: SubscriptionSink, rx_stop: oneshot::Receiver<()>, - ) where + ) -> Result<(), SubscriptionManagementError> + where EventStream: Stream> + Unpin, { let mut stream_item = stream.next(); - let mut stop_event = rx_stop; + + // The stop event can be triggered by the chainHead logic when the pinned + // block guarantee cannot be hold. Or when the client is disconnected. + let connection_closed = sink.closed(); + tokio::pin!(connection_closed); + let mut stop_event = futures_util::future::select(rx_stop, connection_closed); while let Either::Left((Some(event), next_stop_event)) = futures_util::future::select(stream_item, stop_event).await @@ -571,7 +627,7 @@ where ); let msg = to_sub_message(&sink, &FollowEvent::::Stop); let _ = sink.send(msg).await; - return + return Err(err) }, }; @@ -586,7 +642,8 @@ where let msg = to_sub_message(&sink, &FollowEvent::::Stop); let _ = sink.send(msg).await; - return + // No need to propagate this error further, the client disconnected. + return Ok(()) } } @@ -594,10 +651,13 @@ where stop_event = next_stop_event; } - // If we got here either the substrate streams have closed - // or the `Stop` receiver was triggered. + // If we got here either: + // - the substrate streams have closed + // - the `Stop` receiver was triggered internally (cannot hold the pinned block guarantee) + // - the client disconnected. let msg = to_sub_message(&sink, &FollowEvent::::Stop); let _ = sink.send(msg).await; + Ok(()) } /// Generate the block events for the `chainHead_follow` method. @@ -605,7 +665,7 @@ where &mut self, sink: SubscriptionSink, sub_data: InsertedSubscriptionData, - ) { + ) -> Result<(), SubscriptionManagementError> { // Register for the new block and finalized notifications. let stream_import = self .client @@ -633,7 +693,7 @@ where ); let msg = to_sub_message(&sink, &FollowEvent::::Stop); let _ = sink.send(msg).await; - return + return Err(err) }, }; @@ -643,6 +703,6 @@ where let stream = stream::once(futures::future::ready(initial)).chain(merged); self.submit_events(&startup_point, stream.boxed(), pruned_forks, sink, sub_data.rx_stop) - .await; + .await } } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/error.rs b/substrate/client/rpc-spec-v2/src/chain_head/error.rs index 8c50e445aa0cf6480d3a672f7120cd7a492f4c99..35604db0660091b3bc8ea7053e3ccd5391f9a32a 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/error.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/error.rs @@ -23,6 +23,9 @@ use jsonrpsee::types::error::ErrorObject; /// ChainHead RPC errors. #[derive(Debug, thiserror::Error)] pub enum Error { + /// Maximum number of chainHead_follow has been reached. + #[error("Maximum number of chainHead_follow has been reached")] + ReachedLimits, /// The provided block hash is invalid. #[error("Invalid block hash")] InvalidBlock, @@ -46,6 +49,8 @@ pub enum Error { /// Errors for `chainHead` RPC module, as defined in /// . pub mod rpc_spec_v2 { + /// Maximum number of chainHead_follow has been reached. + pub const REACHED_LIMITS: i32 = -32800; /// The provided block hash is invalid. pub const INVALID_BLOCK_ERROR: i32 = -32801; /// The follow subscription was started with `withRuntime` set to `false`. @@ -70,6 +75,8 @@ impl From for ErrorObject<'static> { let msg = e.to_string(); match e { + Error::ReachedLimits => + ErrorObject::owned(rpc_spec_v2::REACHED_LIMITS, msg, None::<()>), Error::InvalidBlock => ErrorObject::owned(rpc_spec_v2::INVALID_BLOCK_ERROR, msg, None::<()>), Error::InvalidRuntimeCall(_) => diff --git a/substrate/client/rpc-spec-v2/src/chain_head/event.rs b/substrate/client/rpc-spec-v2/src/chain_head/event.rs index e0c804d16eb17853876a8b4bfd38d42d4b506f49..bd9863060910542b1fdd2785d69fc9bb3472879b 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/event.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/event.rs @@ -111,7 +111,7 @@ impl From for RuntimeEvent { #[derive(Debug, Clone, PartialEq, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Initialized { - /// The hash of the lastest finalized blocks. + /// The hash of the latest finalized blocks. pub finalized_block_hashes: Vec, /// The runtime version of the finalized block. /// @@ -315,7 +315,7 @@ pub enum FollowEvent { Stop, } -/// The method respose of `chainHead_body`, `chainHead_call` and `chainHead_storage`. +/// The method response of `chainHead_body`, `chainHead_call` and `chainHead_storage`. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] #[serde(tag = "result")] diff --git a/substrate/client/rpc-spec-v2/src/chain_head/subscription/error.rs b/substrate/client/rpc-spec-v2/src/chain_head/subscription/error.rs index 2c22e51ca4dc70102fca98f4eea7f66d8e044a02..91ce26db22a5f1291e86a6159c33761af4dec82f 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/subscription/error.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/subscription/error.rs @@ -41,6 +41,9 @@ pub enum SubscriptionManagementError { /// The unpin method was called with duplicate hashes. #[error("Duplicate hashes")] DuplicateHashes, + /// The distance between the leaves and the current finalized block is too large. + #[error("Distance too large")] + BlockDistanceTooLarge, /// Custom error. #[error("Subscription error {0}")] Custom(String), @@ -57,6 +60,7 @@ impl PartialEq for SubscriptionManagementError { (Self::BlockHeaderAbsent, Self::BlockHeaderAbsent) | (Self::SubscriptionAbsent, Self::SubscriptionAbsent) | (Self::DuplicateHashes, Self::DuplicateHashes) => true, + (Self::BlockDistanceTooLarge, Self::BlockDistanceTooLarge) => true, (Self::Custom(lhs), Self::Custom(rhs)) => lhs == rhs, _ => false, } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs index d2879679501fd17eada1b826ff01478cce24b7a3..0e5ccb91d39a61a76a850119590699d563d88df8 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs @@ -560,6 +560,7 @@ pub struct SubscriptionsInner> { max_ongoing_operations: usize, /// Map the subscription ID to internal details of the subscription. subs: HashMap>, + /// Backend pinning / unpinning blocks. /// /// The `Arc` is handled one level-above, but substrate exposes the backend as Arc. @@ -623,6 +624,15 @@ impl> SubscriptionsInner { } } + /// All active subscriptions are removed. + pub fn stop_all_subscriptions(&mut self) { + let to_remove: Vec<_> = self.subs.keys().map(|sub_id| sub_id.clone()).collect(); + + for sub_id in to_remove { + self.remove_subscription(&sub_id); + } + } + /// Ensure that a new block could be pinned. /// /// If the global number of blocks has been reached this method @@ -878,6 +888,30 @@ mod tests { (backend, client) } + fn produce_blocks( + mut client: Arc>>, + num_blocks: usize, + ) -> Vec<::Hash> { + let mut blocks = Vec::with_capacity(num_blocks); + let mut parent_hash = client.chain_info().genesis_hash; + + for i in 0..num_blocks { + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(parent_hash) + .with_parent_block_number(i as u64) + .build() + .unwrap() + .build() + .unwrap() + .block; + parent_hash = block.header.hash(); + futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); + blocks.push(block.header.hash()); + } + + blocks + } + #[test] fn block_state_machine_register_unpin() { let mut state = BlockStateMachine::new(); @@ -1003,37 +1037,10 @@ mod tests { #[test] fn unpin_duplicate_hashes() { - let (backend, mut client) = init_backend(); - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(client.chain_info().genesis_hash) - .with_parent_block_number(0) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash_1 = block.header.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(hash_1) - .with_parent_block_number(1) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash_2 = block.header.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(hash_2) - .with_parent_block_number(2) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash_3 = block.header.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); + let (backend, client) = init_backend(); + + let hashes = produce_blocks(client, 3); + let (hash_1, hash_2, hash_3) = (hashes[0], hashes[1], hashes[2]); let mut subs = SubscriptionsInner::new(10, Duration::from_secs(10), MAX_OPERATIONS_PER_SUB, backend); @@ -1102,18 +1109,10 @@ mod tests { #[test] fn subscription_check_block() { - let (backend, mut client) = init_backend(); - - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(client.chain_info().genesis_hash) - .with_parent_block_number(0) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash = block.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); + let (backend, client) = init_backend(); + + let hashes = produce_blocks(client, 1); + let hash = hashes[0]; let mut subs = SubscriptionsInner::new(10, Duration::from_secs(10), MAX_OPERATIONS_PER_SUB, backend); @@ -1140,17 +1139,10 @@ mod tests { #[test] fn subscription_ref_count() { - let (backend, mut client) = init_backend(); - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(client.chain_info().genesis_hash) - .with_parent_block_number(0) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash = block.header.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); + let (backend, client) = init_backend(); + + let hashes = produce_blocks(client, 1); + let hash = hashes[0]; let mut subs = SubscriptionsInner::new(10, Duration::from_secs(10), MAX_OPERATIONS_PER_SUB, backend); @@ -1190,37 +1182,10 @@ mod tests { #[test] fn subscription_remove_subscription() { - let (backend, mut client) = init_backend(); - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(client.chain_info().genesis_hash) - .with_parent_block_number(0) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash_1 = block.header.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(hash_1) - .with_parent_block_number(1) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash_2 = block.header.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(hash_2) - .with_parent_block_number(2) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash_3 = block.header.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); + let (backend, client) = init_backend(); + + let hashes = produce_blocks(client, 3); + let (hash_1, hash_2, hash_3) = (hashes[0], hashes[1], hashes[2]); let mut subs = SubscriptionsInner::new(10, Duration::from_secs(10), MAX_OPERATIONS_PER_SUB, backend); @@ -1256,37 +1221,10 @@ mod tests { #[test] fn subscription_check_limits() { - let (backend, mut client) = init_backend(); - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(client.chain_info().genesis_hash) - .with_parent_block_number(0) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash_1 = block.header.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(hash_1) - .with_parent_block_number(1) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash_2 = block.header.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(hash_2) - .with_parent_block_number(2) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash_3 = block.header.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); + let (backend, client) = init_backend(); + + let hashes = produce_blocks(client, 3); + let (hash_1, hash_2, hash_3) = (hashes[0], hashes[1], hashes[2]); // Maximum number of pinned blocks is 2. let mut subs = @@ -1328,37 +1266,10 @@ mod tests { #[test] fn subscription_check_limits_with_duration() { - let (backend, mut client) = init_backend(); - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(client.chain_info().genesis_hash) - .with_parent_block_number(0) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash_1 = block.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(hash_1) - .with_parent_block_number(1) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash_2 = block.header.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); - let block = BlockBuilderBuilder::new(&*client) - .on_parent_block(hash_2) - .with_parent_block_number(2) - .build() - .unwrap() - .build() - .unwrap() - .block; - let hash_3 = block.header.hash(); - futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); + let (backend, client) = init_backend(); + + let hashes = produce_blocks(client, 3); + let (hash_1, hash_2, hash_3) = (hashes[0], hashes[1], hashes[2]); // Maximum number of pinned blocks is 2 and maximum pin duration is 5 second. let mut subs = @@ -1455,4 +1366,90 @@ mod tests { let permit_three = ops.reserve_at_most(1).unwrap(); assert_eq!(permit_three.num_ops, 1); } + + #[test] + fn stop_all_subscriptions() { + let (backend, client) = init_backend(); + + let hashes = produce_blocks(client, 3); + let (hash_1, hash_2, hash_3) = (hashes[0], hashes[1], hashes[2]); + + let mut subs = + SubscriptionsInner::new(10, Duration::from_secs(10), MAX_OPERATIONS_PER_SUB, backend); + let id_1 = "abc".to_string(); + let id_2 = "abcd".to_string(); + + // Pin all blocks for the first subscription. + let _stop = subs.insert_subscription(id_1.clone(), true).unwrap(); + assert_eq!(subs.pin_block(&id_1, hash_1).unwrap(), true); + assert_eq!(subs.pin_block(&id_1, hash_2).unwrap(), true); + assert_eq!(subs.pin_block(&id_1, hash_3).unwrap(), true); + + // Pin only block 2 for the second subscription. + let _stop = subs.insert_subscription(id_2.clone(), true).unwrap(); + assert_eq!(subs.pin_block(&id_2, hash_2).unwrap(), true); + + // Check reference count. + assert_eq!(*subs.global_blocks.get(&hash_1).unwrap(), 1); + assert_eq!(*subs.global_blocks.get(&hash_2).unwrap(), 2); + assert_eq!(*subs.global_blocks.get(&hash_3).unwrap(), 1); + assert_eq!(subs.global_blocks.len(), 3); + + // Stop all active subscriptions. + subs.stop_all_subscriptions(); + assert!(subs.global_blocks.is_empty()); + } + + #[test] + fn reserved_subscription_cleans_resources() { + let builder = TestClientBuilder::new(); + let backend = builder.backend(); + let subs = Arc::new(parking_lot::RwLock::new(SubscriptionsInner::new( + 10, + Duration::from_secs(10), + MAX_OPERATIONS_PER_SUB, + backend, + ))); + + // Maximum 2 subscriptions per connection. + let rpc_connections = crate::common::connections::RpcConnections::new(2); + + let subscription_management = + crate::chain_head::subscription::SubscriptionManagement::_from_inner( + subs.clone(), + rpc_connections.clone(), + ); + + let reserved_sub_first = subscription_management.reserve_subscription(1).unwrap(); + let mut reserved_sub_second = subscription_management.reserve_subscription(1).unwrap(); + // Subscriptions reserved but not yet populated. + assert_eq!(subs.read().subs.len(), 0); + + // Cannot reserve anymore. + assert!(subscription_management.reserve_subscription(1).is_none()); + // Drop the first subscription. + drop(reserved_sub_first); + // Space is freed-up for the rpc connections. + let mut reserved_sub_first = subscription_management.reserve_subscription(1).unwrap(); + + // Insert subscriptions. + let _sub_data_first = + reserved_sub_first.insert_subscription("sub1".to_string(), true).unwrap(); + let _sub_data_second = + reserved_sub_second.insert_subscription("sub2".to_string(), true).unwrap(); + // Check we have 2 subscriptions under management. + assert_eq!(subs.read().subs.len(), 2); + + // Drop first reserved subscription. + drop(reserved_sub_first); + // Check that the subscription is removed. + assert_eq!(subs.read().subs.len(), 1); + // Space is freed-up for the rpc connections. + let reserved_sub_first = subscription_management.reserve_subscription(1).unwrap(); + + // Drop all subscriptions. + drop(reserved_sub_first); + drop(reserved_sub_second); + assert_eq!(subs.read().subs.len(), 0); + } } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/subscription/mod.rs b/substrate/client/rpc-spec-v2/src/chain_head/subscription/mod.rs index c830e662da2e5c3499f86ae1ddc40bd3b6f4e40a..f266c9d8b34fc774e04a5825740204913851db82 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/subscription/mod.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/subscription/mod.rs @@ -16,6 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use jsonrpsee::ConnectionId; use parking_lot::RwLock; use sc_client_api::Backend; use sp_runtime::traits::Block as BlockT; @@ -24,6 +25,11 @@ use std::{sync::Arc, time::Duration}; mod error; mod inner; +use crate::{ + chain_head::chain_head::LOG_TARGET, + common::connections::{RegisteredConnection, ReservedConnection, RpcConnections}, +}; + use self::inner::SubscriptionsInner; pub use self::inner::OperationState; @@ -34,7 +40,22 @@ pub use inner::{BlockGuard, InsertedSubscriptionData}; pub struct SubscriptionManagement> { /// Manage subscription by mapping the subscription ID /// to a set of block hashes. - inner: RwLock>, + inner: Arc>>, + + /// Ensures that chainHead methods can be called from a single connection context. + /// + /// For example, `chainHead_storage` cannot be called with a subscription ID that + /// was obtained from a different connection. + rpc_connections: RpcConnections, +} + +impl> Clone for SubscriptionManagement { + fn clone(&self) -> Self { + SubscriptionManagement { + inner: self.inner.clone(), + rpc_connections: self.rpc_connections.clone(), + } + } } impl> SubscriptionManagement { @@ -43,30 +64,55 @@ impl> SubscriptionManagement { global_max_pinned_blocks: usize, local_max_pin_duration: Duration, max_ongoing_operations: usize, + max_follow_subscriptions_per_connection: usize, backend: Arc, ) -> Self { SubscriptionManagement { - inner: RwLock::new(SubscriptionsInner::new( + inner: Arc::new(RwLock::new(SubscriptionsInner::new( global_max_pinned_blocks, local_max_pin_duration, max_ongoing_operations, backend, - )), + ))), + rpc_connections: RpcConnections::new(max_follow_subscriptions_per_connection), } } - /// Insert a new subscription ID. + /// Create a new instance from the inner state. /// - /// If the subscription was not previously inserted, returns the receiver that is - /// triggered upon the "Stop" event. Otherwise, if the subscription ID was already - /// inserted returns none. - pub fn insert_subscription( + /// # Note + /// + /// Used for testing. + #[cfg(test)] + pub(crate) fn _from_inner( + inner: Arc>>, + rpc_connections: RpcConnections, + ) -> Self { + SubscriptionManagement { inner, rpc_connections } + } + + /// Reserve space for a subscriptions. + /// + /// Fails if the connection ID is has reached the maximum number of active subscriptions. + pub fn reserve_subscription( &self, - sub_id: String, - runtime_updates: bool, - ) -> Option> { - let mut inner = self.inner.write(); - inner.insert_subscription(sub_id, runtime_updates) + connection_id: ConnectionId, + ) -> Option> { + let reserved_token = self.rpc_connections.reserve_space(connection_id)?; + + Some(ReservedSubscription { + state: ConnectionState::Reserved(reserved_token), + inner: self.inner.clone(), + }) + } + + /// Check if the given connection contains the given subscription. + pub fn contains_subscription( + &self, + connection_id: ConnectionId, + subscription_id: &str, + ) -> bool { + self.rpc_connections.contains_identifier(connection_id, subscription_id) } /// Remove the subscription ID with associated pinned blocks. @@ -136,3 +182,72 @@ impl> SubscriptionManagement { inner.get_operation(sub_id, operation_id) } } + +/// The state of the connection. +/// +/// The state starts in a [`ConnectionState::Reserved`] state and then transitions to +/// [`ConnectionState::Registered`] when the subscription is inserted. +enum ConnectionState { + Reserved(ReservedConnection), + Registered { _unregister_on_drop: RegisteredConnection, sub_id: String }, + Empty, +} + +/// RAII wrapper that removes the subscription from internal mappings and +/// gives back the reserved space for the connection. +pub struct ReservedSubscription> { + state: ConnectionState, + inner: Arc>>, +} + +impl> ReservedSubscription { + /// Insert a new subscription ID. + /// + /// If the subscription was not previously inserted, returns the receiver that is + /// triggered upon the "Stop" event. Otherwise, if the subscription ID was already + /// inserted returns none. + /// + /// # Note + /// + /// This method should be called only once. + pub fn insert_subscription( + &mut self, + sub_id: String, + runtime_updates: bool, + ) -> Option> { + match std::mem::replace(&mut self.state, ConnectionState::Empty) { + ConnectionState::Reserved(reserved) => { + let registered_token = reserved.register(sub_id.clone())?; + self.state = ConnectionState::Registered { + _unregister_on_drop: registered_token, + sub_id: sub_id.clone(), + }; + + let mut inner = self.inner.write(); + inner.insert_subscription(sub_id, runtime_updates) + }, + // Cannot insert multiple subscriptions into one single reserved space. + ConnectionState::Registered { .. } | ConnectionState::Empty => { + log::error!(target: LOG_TARGET, "Called insert_subscription on a connection that is not reserved"); + None + }, + } + } + + /// Stop all active subscriptions. + /// + /// For all active subscriptions, the internal data is discarded, blocks are unpinned and the + /// `Stop` event will be generated. + pub fn stop_all_subscriptions(&self) { + let mut inner = self.inner.write(); + inner.stop_all_subscriptions() + } +} + +impl> Drop for ReservedSubscription { + fn drop(&mut self) { + if let ConnectionState::Registered { sub_id, .. } = &self.state { + self.inner.write().remove_subscription(sub_id); + } + } +} diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index 4d9dfb24e0a93bc45669ff1af4899eeb27694c18..c2bff7c50d5ebd951596f15e9e7423a77b889c0b 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -17,7 +17,7 @@ // along with this program. If not, see . use crate::{ - chain_head::{event::MethodResponse, test_utils::ChainHeadMockClient}, + chain_head::{api::ChainHeadApiClient, event::MethodResponse, test_utils::ChainHeadMockClient}, common::events::{StorageQuery, StorageQueryType, StorageResultType}, hex_string, }; @@ -27,8 +27,12 @@ use assert_matches::assert_matches; use codec::{Decode, Encode}; use futures::Future; use jsonrpsee::{ - core::server::Subscription as RpcSubscription, rpc_params, MethodsError as Error, RpcModule, + core::{ + client::Subscription as RpcClientSubscription, server::Subscription as RpcSubscription, + }, + rpc_params, MethodsError as Error, RpcModule, }; + use sc_block_builder::BlockBuilderBuilder; use sc_client_api::ChildInfo; use sc_service::client::new_in_mem; @@ -59,6 +63,9 @@ const MAX_PINNED_BLOCKS: usize = 32; const MAX_PINNED_SECS: u64 = 60; const MAX_OPERATIONS: usize = 16; const MAX_PAGINATION_LIMIT: usize = 5; +const MAX_LAGGING_DISTANCE: usize = 128; +const MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION: usize = 4; + const INVALID_HASH: [u8; 32] = [1; 32]; const KEY: &[u8] = b":mock"; const VALUE: &[u8] = b"hello world"; @@ -66,6 +73,36 @@ const CHILD_STORAGE_KEY: &[u8] = b"child"; const CHILD_VALUE: &[u8] = b"child value"; const DOES_NOT_PRODUCE_EVENTS_SECONDS: u64 = 10; +/// Start an RPC server with the chainHead module. +pub async fn run_server() -> std::net::SocketAddr { + let builder = TestClientBuilder::new(); + let backend = builder.backend(); + let client = Arc::new(builder.build()); + + let api = ChainHead::new( + client, + backend, + Arc::new(TaskExecutor::default()), + ChainHeadConfig { + global_max_pinned_blocks: MAX_PINNED_BLOCKS, + subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), + subscription_max_ongoing_operations: MAX_OPERATIONS, + operation_max_storage_items: MAX_PAGINATION_LIMIT, + max_follow_subscriptions_per_connection: 1, + max_lagging_distance: MAX_LAGGING_DISTANCE, + }, + ) + .into_rpc(); + + let server = jsonrpsee::server::ServerBuilder::default().build("127.0.0.1:0").await.unwrap(); + + let addr = server.local_addr().unwrap(); + let handle = server.start(api); + + tokio::spawn(handle.stopped()); + addr +} + async fn get_next_event(sub: &mut RpcSubscription) -> T { let (event, _sub_id) = tokio::time::timeout(std::time::Duration::from_secs(60), sub.next()) .await @@ -113,6 +150,8 @@ async fn setup_api() -> ( subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -163,6 +202,9 @@ async fn follow_subscription_produces_blocks() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -231,6 +273,9 @@ async fn follow_with_runtime() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -543,6 +588,9 @@ async fn call_runtime_without_flag() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -1201,6 +1249,9 @@ async fn separate_operation_ids_for_subscriptions() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -1289,6 +1340,9 @@ async fn follow_generates_initial_blocks() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -1444,6 +1498,9 @@ async fn follow_exceeding_pinned_blocks() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -1520,6 +1577,9 @@ async fn follow_with_unpin() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -1631,6 +1691,9 @@ async fn unpin_duplicate_hashes() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -1733,6 +1796,9 @@ async fn follow_with_multiple_unpin_hashes() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -1886,6 +1952,9 @@ async fn follow_prune_best_block() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -2071,6 +2140,9 @@ async fn follow_forks_pruned_block() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -2230,6 +2302,9 @@ async fn follow_report_multiple_pruned_block() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -2475,6 +2550,9 @@ async fn pin_block_references() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -2612,6 +2690,9 @@ async fn follow_finalized_before_new_block() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -2726,6 +2807,9 @@ async fn ensure_operation_limits_works() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: 1, operation_max_storage_items: MAX_PAGINATION_LIMIT, + + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -2786,7 +2870,7 @@ async fn ensure_operation_limits_works() { FollowEvent::OperationStorageDone(done) if done.operation_id == operation_id ); - // The storage is finished and capactiy must be released. + // The storage is finished and capacity must be released. let alice_id = AccountKeyring::Alice.to_account_id(); // Hex encoded scale encoded bytes representing the call parameters. let call_parameters = hex_string(&alice_id.encode()); @@ -2830,6 +2914,9 @@ async fn check_continue_operation() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: 1, + + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -3012,6 +3099,9 @@ async fn stop_storage_operation() { subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, operation_max_storage_items: 1, + + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, }, ) .into_rpc(); @@ -3113,7 +3203,7 @@ async fn storage_closest_merkle_value() { /// The core of this test. /// - /// Checks keys that are exact match, keys with descedant and keys that should not return + /// Checks keys that are exact match, keys with descendant and keys that should not return /// values. /// /// Returns (key, merkle value) pairs. @@ -3139,7 +3229,7 @@ async fn storage_closest_merkle_value() { key: hex_string(b":AAAB"), query_type: StorageQueryType::ClosestDescendantMerkleValue }, - // Key with descedent. + // Key with descendant. StorageQuery { key: hex_string(b":A"), query_type: StorageQueryType::ClosestDescendantMerkleValue @@ -3297,3 +3387,260 @@ async fn storage_closest_merkle_value() { merkle_values_rhs.get(&hex_string(b":AAAA")).unwrap() ); } + +#[tokio::test] +async fn chain_head_stop_all_subscriptions() { + let builder = TestClientBuilder::new(); + let backend = builder.backend(); + let mut client = Arc::new(builder.build()); + + // Configure the chainHead to stop all subscriptions on lagging distance of 5 blocks. + let api = ChainHead::new( + client.clone(), + backend, + Arc::new(TaskExecutor::default()), + ChainHeadConfig { + global_max_pinned_blocks: MAX_PINNED_BLOCKS, + subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), + subscription_max_ongoing_operations: MAX_OPERATIONS, + operation_max_storage_items: MAX_PAGINATION_LIMIT, + max_lagging_distance: 5, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + }, + ) + .into_rpc(); + + let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + + // Ensure the imported block is propagated and pinned for this subscription. + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::Initialized(_) + ); + + // Import 6 blocks in total to trigger the suspension distance. + let mut parent_hash = client.chain_info().genesis_hash; + for i in 0..6 { + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(parent_hash) + .with_parent_block_number(i) + .build() + .unwrap() + .build() + .unwrap() + .block; + + let hash = block.hash(); + parent_hash = hash; + client.import(BlockOrigin::Own, block.clone()).await.unwrap(); + + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::NewBlock(_) + ); + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::BestBlockChanged(_) + ); + } + + let mut second_sub = + api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + // Lagging detected, the stop event is delivered immediately. + assert_matches!( + get_next_event::>(&mut second_sub).await, + FollowEvent::Stop + ); + + // Ensure that all subscriptions are stopped. + assert_matches!(get_next_event::>(&mut sub).await, FollowEvent::Stop); + + // Other subscriptions cannot be started until the suspension period is over. + let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + // Should receive the stop event immediately. + assert_matches!(get_next_event::>(&mut sub).await, FollowEvent::Stop); + + // For the next subscription, lagging distance must be smaller. + client.finalize_block(parent_hash, None).unwrap(); + + let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::Initialized(_) + ); +} + +#[tokio::test] +async fn chain_head_single_connection_context() { + let server_addr = run_server().await; + let server_url = format!("ws://{}", server_addr); + let client = jsonrpsee::ws_client::WsClientBuilder::default() + .build(&server_url) + .await + .unwrap(); + // Calls cannot be made from a different connection context. + let second_client = jsonrpsee::ws_client::WsClientBuilder::default() + .build(&server_url) + .await + .unwrap(); + + let mut sub: RpcClientSubscription> = + ChainHeadApiClient::::chain_head_unstable_follow(&client, true) + .await + .unwrap(); + + let event = tokio::time::timeout(std::time::Duration::from_secs(60), sub.next()) + .await + .unwrap() + .unwrap() + .unwrap(); + let finalized_hash = match event { + FollowEvent::Initialized(init) => init.finalized_block_hashes.into_iter().last().unwrap(), + _ => panic!("Expected FollowEvent::Initialized"), + }; + + let first_sub_id = match sub.kind() { + jsonrpsee::core::client::SubscriptionKind::Subscription(id) => match id { + jsonrpsee::types::SubscriptionId::Num(num) => num.to_string(), + jsonrpsee::types::SubscriptionId::Str(s) => s.to_string(), + }, + _ => panic!("Unexpected subscription ID"), + }; + + // Trying to unpin from a different connection will have no effect. + let _response = ChainHeadApiClient::::chain_head_unstable_unpin( + &second_client, + first_sub_id.clone(), + crate::chain_head::api::ListOrValue::Value(finalized_hash.clone()), + ) + .await + .unwrap(); + + // Body can still be fetched from the first subscription. + let response: MethodResponse = ChainHeadApiClient::::chain_head_unstable_body( + &client, + first_sub_id.clone(), + finalized_hash.clone(), + ) + .await + .unwrap(); + assert_matches!(response, MethodResponse::Started(_started)); + + // Cannot make a call from a different connection context. + let response: MethodResponse = ChainHeadApiClient::::chain_head_unstable_body( + &second_client, + first_sub_id.clone(), + finalized_hash.clone(), + ) + .await + .unwrap(); + assert_matches!(response, MethodResponse::LimitReached); + + let response: Option = ChainHeadApiClient::::chain_head_unstable_header( + &client, + first_sub_id.clone(), + finalized_hash.clone(), + ) + .await + .unwrap(); + assert!(response.is_some()); + // Cannot make a call from a different connection context. + let response: Option = ChainHeadApiClient::::chain_head_unstable_header( + &second_client, + first_sub_id.clone(), + finalized_hash.clone(), + ) + .await + .unwrap(); + assert!(response.is_none()); + + let key = hex_string(&KEY); + let response: MethodResponse = ChainHeadApiClient::::chain_head_unstable_storage( + &client, + first_sub_id.clone(), + finalized_hash.clone(), + vec![StorageQuery { key: key.clone(), query_type: StorageQueryType::Hash }], + None, + ) + .await + .unwrap(); + assert_matches!(response, MethodResponse::Started(_started)); + // Cannot make a call from a different connection context. + let response: MethodResponse = ChainHeadApiClient::::chain_head_unstable_storage( + &second_client, + first_sub_id.clone(), + finalized_hash.clone(), + vec![StorageQuery { key: key.clone(), query_type: StorageQueryType::Hash }], + None, + ) + .await + .unwrap(); + assert_matches!(response, MethodResponse::LimitReached); + + let alice_id = AccountKeyring::Alice.to_account_id(); + // Hex encoded scale encoded bytes representing the call parameters. + let call_parameters = hex_string(&alice_id.encode()); + let response: MethodResponse = ChainHeadApiClient::::chain_head_unstable_call( + &client, + first_sub_id.clone(), + finalized_hash.clone(), + "AccountNonceApi_account_nonce".into(), + call_parameters.clone(), + ) + .await + .unwrap(); + assert_matches!(response, MethodResponse::Started(_started)); + // Cannot make a call from a different connection context. + let response: MethodResponse = ChainHeadApiClient::::chain_head_unstable_call( + &second_client, + first_sub_id.clone(), + finalized_hash.clone(), + "AccountNonceApi_account_nonce".into(), + call_parameters.clone(), + ) + .await + .unwrap(); + assert_matches!(response, MethodResponse::LimitReached); +} + +#[tokio::test] +async fn chain_head_limit_reached() { + let builder = TestClientBuilder::new(); + let backend = builder.backend(); + let client = Arc::new(builder.build()); + + // Maximum of 1 chainHead_follow subscription. + let api = ChainHead::new( + client.clone(), + backend, + Arc::new(TaskExecutor::default()), + ChainHeadConfig { + global_max_pinned_blocks: MAX_PINNED_BLOCKS, + subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), + subscription_max_ongoing_operations: MAX_OPERATIONS, + operation_max_storage_items: MAX_PAGINATION_LIMIT, + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: 1, + }, + ) + .into_rpc(); + + let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + + // Initialized must always be reported first. + let _event: FollowEvent = get_next_event(&mut sub).await; + + let error = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap_err(); + assert!(error + .to_string() + .contains("Maximum number of chainHead_follow has been reached")); + + // After dropping the subscription, other subscriptions are allowed to be created. + drop(sub); + // Ensure the `chainHead_unfollow` is propagated to the server. + tokio::time::sleep(std::time::Duration::from_secs(5)).await; + + let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + // Initialized must always be reported first. + let _event: FollowEvent = get_next_event(&mut sub).await; +} diff --git a/substrate/client/rpc-spec-v2/src/common/connections.rs b/substrate/client/rpc-spec-v2/src/common/connections.rs new file mode 100644 index 0000000000000000000000000000000000000000..c16a80bf49db93853ca223ab632dbc544082ae44 --- /dev/null +++ b/substrate/client/rpc-spec-v2/src/common/connections.rs @@ -0,0 +1,262 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use jsonrpsee::ConnectionId; +use parking_lot::Mutex; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; + +/// Connection state which keeps track whether a connection exist and +/// the number of concurrent operations. +#[derive(Default, Clone)] +pub struct RpcConnections { + /// The number of identifiers that can be registered for each connection. + /// + /// # Example + /// + /// This is used to limit how many `chainHead_follow` subscriptions are active at one time. + capacity: usize, + /// Map the connecton ID to a set of identifiers. + data: Arc>>, +} + +#[derive(Default)] +struct ConnectionData { + /// The total number of identifiers for the given connection. + /// + /// An identifier for a connection might be: + /// - the subscription ID for chainHead_follow + /// - the operation ID for the transactionBroadcast API + /// - or simply how many times the transaction API has been called. + /// + /// # Note + /// + /// Because a pending subscription sink does not expose the future subscription ID, + /// we cannot register a subscription ID before the pending subscription is accepted. + /// This variable ensures that we have enough capacity to register an identifier, after + /// the subscription is accepted. Otherwise, a jsonrpc error object should be returned. + num_identifiers: usize, + /// Active registered identifiers for the given connection. + /// + /// # Note + /// + /// For chainHead, this represents the subscription ID. + /// For transactionBroadcast, this represents the operation ID. + /// For transaction, this is empty and the number of active calls is tracked by + /// [`Self::num_identifiers`]. + identifiers: HashSet, +} + +impl RpcConnections { + /// Constructs a new instance of [`RpcConnections`]. + pub fn new(capacity: usize) -> Self { + RpcConnections { capacity, data: Default::default() } + } + + /// Reserve space for a new connection identifier. + /// + /// If the number of active identifiers for the given connection exceeds the capacity, + /// returns None. + pub fn reserve_space(&self, connection_id: ConnectionId) -> Option { + let mut data = self.data.lock(); + + let entry = data.entry(connection_id).or_insert_with(ConnectionData::default); + if entry.num_identifiers >= self.capacity { + return None; + } + entry.num_identifiers = entry.num_identifiers.saturating_add(1); + + Some(ReservedConnection { connection_id, rpc_connections: Some(self.clone()) }) + } + + /// Gives back the reserved space before the connection identifier is registered. + /// + /// # Note + /// + /// This may happen if the pending subscription cannot be accepted (unlikely). + fn unreserve_space(&self, connection_id: ConnectionId) { + let mut data = self.data.lock(); + + let entry = data.entry(connection_id).or_insert_with(ConnectionData::default); + entry.num_identifiers = entry.num_identifiers.saturating_sub(1); + + if entry.num_identifiers == 0 { + data.remove(&connection_id); + } + } + + /// Register an identifier for the given connection. + /// + /// Users must call [`Self::reserve_space`] before calling this method to ensure enough + /// space is available. + /// + /// Returns true if the identifier was inserted successfully, false if the identifier was + /// already inserted or reached capacity. + fn register_identifier(&self, connection_id: ConnectionId, identifier: String) -> bool { + let mut data = self.data.lock(); + + let entry = data.entry(connection_id).or_insert_with(ConnectionData::default); + // Should be already checked `Self::reserve_space`. + if entry.identifiers.len() >= self.capacity { + return false; + } + + entry.identifiers.insert(identifier) + } + + /// Unregister an identifier for the given connection. + fn unregister_identifier(&self, connection_id: ConnectionId, identifier: &str) { + let mut data = self.data.lock(); + if let Some(connection_data) = data.get_mut(&connection_id) { + connection_data.identifiers.remove(identifier); + connection_data.num_identifiers = connection_data.num_identifiers.saturating_sub(1); + + if connection_data.num_identifiers == 0 { + data.remove(&connection_id); + } + } + } + + /// Check if the given connection contains the given identifier. + pub fn contains_identifier(&self, connection_id: ConnectionId, identifier: &str) -> bool { + let data = self.data.lock(); + data.get(&connection_id) + .map(|connection_data| connection_data.identifiers.contains(identifier)) + .unwrap_or(false) + } +} + +/// RAII wrapper that ensures the reserved space is given back if the object is +/// dropped before the identifier is registered. +pub struct ReservedConnection { + connection_id: ConnectionId, + rpc_connections: Option, +} + +impl ReservedConnection { + /// Register the identifier for the given connection. + pub fn register(mut self, identifier: String) -> Option { + let rpc_connections = self.rpc_connections.take()?; + + if rpc_connections.register_identifier(self.connection_id, identifier.clone()) { + Some(RegisteredConnection { + connection_id: self.connection_id, + identifier, + rpc_connections, + }) + } else { + None + } + } +} + +impl Drop for ReservedConnection { + fn drop(&mut self) { + if let Some(rpc_connections) = self.rpc_connections.take() { + rpc_connections.unreserve_space(self.connection_id); + } + } +} + +/// RAII wrapper that ensures the identifier is unregistered if the object is dropped. +pub struct RegisteredConnection { + connection_id: ConnectionId, + identifier: String, + rpc_connections: RpcConnections, +} + +impl Drop for RegisteredConnection { + fn drop(&mut self) { + self.rpc_connections.unregister_identifier(self.connection_id, &self.identifier); + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn reserve_space() { + let rpc_connections = RpcConnections::new(2); + let reserved = rpc_connections.reserve_space(1); + assert!(reserved.is_some()); + assert_eq!(1, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); + assert_eq!(rpc_connections.data.lock().len(), 1); + + let reserved = reserved.unwrap(); + let registered = reserved.register("identifier1".to_string()).unwrap(); + assert!(rpc_connections.contains_identifier(1, "identifier1")); + assert_eq!(1, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); + drop(registered); + + // Data is dropped. + assert!(rpc_connections.data.lock().get(&1).is_none()); + assert!(rpc_connections.data.lock().is_empty()); + // Checks can still happen. + assert!(!rpc_connections.contains_identifier(1, "identifier1")); + } + + #[test] + fn reserve_space_capacity_reached() { + let rpc_connections = RpcConnections::new(2); + + // Reserve identifier for connection 1. + let reserved = rpc_connections.reserve_space(1); + assert!(reserved.is_some()); + assert_eq!(1, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); + + // Add identifier for connection 1. + let reserved = reserved.unwrap(); + let registered = reserved.register("identifier1".to_string()).unwrap(); + assert!(rpc_connections.contains_identifier(1, "identifier1")); + assert_eq!(1, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); + + // Reserve identifier for connection 1 again. + let reserved = rpc_connections.reserve_space(1); + assert!(reserved.is_some()); + assert_eq!(2, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); + + // Add identifier for connection 1 again. + let reserved = reserved.unwrap(); + let registered_second = reserved.register("identifier2".to_string()).unwrap(); + assert!(rpc_connections.contains_identifier(1, "identifier2")); + assert_eq!(2, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); + + // Cannot reserve more identifiers. + let reserved = rpc_connections.reserve_space(1); + assert!(reserved.is_none()); + + // Drop the first identifier. + drop(registered); + assert_eq!(1, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); + assert!(rpc_connections.contains_identifier(1, "identifier2")); + assert!(!rpc_connections.contains_identifier(1, "identifier1")); + + // Can reserve again after clearing the space. + let reserved = rpc_connections.reserve_space(1); + assert!(reserved.is_some()); + assert_eq!(2, rpc_connections.data.lock().get(&1).unwrap().num_identifiers); + + // Ensure data is cleared. + drop(reserved); + drop(registered_second); + assert!(rpc_connections.data.lock().get(&1).is_none()); + } +} diff --git a/substrate/client/rpc-spec-v2/src/common/mod.rs b/substrate/client/rpc-spec-v2/src/common/mod.rs index ac1af8fce3c9bc42a00fa747f62289aef0e04356..3167561d649a2f51f24b864ffa581df16b139202 100644 --- a/substrate/client/rpc-spec-v2/src/common/mod.rs +++ b/substrate/client/rpc-spec-v2/src/common/mod.rs @@ -13,5 +13,6 @@ //! Common types and functionality for the RPC-V2 spec. +pub mod connections; pub mod events; pub mod storage; diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs index 690a1a64d7460c656f5180c59120c951dfb04472..77a28968aedf8cfeefc7964ca29b972a856d618e 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs @@ -372,7 +372,7 @@ async fn tx_broadcast_resubmits_invalid_tx() { client_mock.trigger_import_stream(block_2_header).await; // Ensure we propagate the temporary ban error to `submit_and_watch`. - // This ensures we'll loop again with the next annmounced block and try to resubmit the + // This ensures we'll loop again with the next announced block and try to resubmit the // transaction. The transaction remains temporarily banned until the pool is maintained. let event = get_next_event!(&mut pool_middleware); assert_matches!(event, MiddlewarePoolEvent::PoolError { transaction, err } if transaction == xt && err.contains("Transaction temporarily Banned")); @@ -429,7 +429,7 @@ async fn tx_broadcast_resubmits_invalid_tx() { } /// This is similar to `tx_broadcast_resubmits_invalid_tx`. -/// However, it forces the tx to be resubmited because of the pool +/// However, it forces the tx to be resubmitted because of the pool /// limits. Which is a different code path than the invalid tx. #[tokio::test] async fn tx_broadcast_resubmits_dropped_tx() { @@ -509,7 +509,7 @@ async fn tx_broadcast_resubmits_dropped_tx() { pool.inner_pool.maintain(event).await; client_mock.trigger_import_stream(block_3_header.clone()).await; - // The first tx is in a finalzied block; the future tx must enter the pool. + // The first tx is in a finalized block; the future tx must enter the pool. let events = get_next_tx_events!(&mut pool_middleware, 3); assert_eq!( events.get(¤t_xt).unwrap(), diff --git a/substrate/client/rpc-spec-v2/src/transaction/transaction_broadcast.rs b/substrate/client/rpc-spec-v2/src/transaction/transaction_broadcast.rs index 92c838261874a816335b6d7e82c7df7667e2c235..6eaf50d6b2e2822d766f4f131f4bd6f1ba04d5cd 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/transaction_broadcast.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/transaction_broadcast.rs @@ -44,7 +44,7 @@ pub struct TransactionBroadcast { pool: Arc, /// Executor to spawn subscriptions. executor: SubscriptionTaskExecutor, - /// The brodcast operation IDs. + /// The broadcast operation IDs. broadcast_ids: Arc>>, } @@ -200,7 +200,7 @@ where } } -/// Returns the last element of the providided stream, or `None` if the stream is closed. +/// Returns the last element of the provided stream, or `None` if the stream is closed. async fn last_stream_element(stream: &mut S) -> Option where S: Stream + Unpin, diff --git a/substrate/client/rpc/Cargo.toml b/substrate/client/rpc/Cargo.toml index f65e6c9a59ec1c6c593d00c7639ca4bef26e4319..dff34215b025c26fa3e6f4fa2949dcd0d2e4203a 100644 --- a/substrate/client/rpc/Cargo.toml +++ b/substrate/client/rpc/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } -futures = "0.3.21" +futures = "0.3.30" jsonrpsee = { version = "0.22", features = ["server"] } log = { workspace = true, default-features = true } parking_lot = "0.12.1" @@ -40,10 +40,10 @@ sp-runtime = { path = "../../primitives/runtime" } sp-session = { path = "../../primitives/session" } sp-version = { path = "../../primitives/version" } sp-statement-store = { path = "../../primitives/statement-store" } -tokio = "1.22.0" +tokio = "1.37" [dev-dependencies] -env_logger = "0.9" +env_logger = "0.11" assert_matches = "1.3.0" sc-block-builder = { path = "../block-builder" } sc-network = { path = "../network" } @@ -51,7 +51,7 @@ sc-network-common = { path = "../network/common" } sc-transaction-pool = { path = "../transaction-pool" } sp-consensus = { path = "../../primitives/consensus/common" } sp-crypto-hashing = { path = "../../primitives/crypto/hashing" } -tokio = "1.22.0" +tokio = "1.37" sp-io = { path = "../../primitives/io" } substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } pretty_assertions = "1.2.1" diff --git a/substrate/client/rpc/src/statement/mod.rs b/substrate/client/rpc/src/statement/mod.rs index b4f432bbbb0e3485a67447fb34669bf980602868..e99135aec38c6fb61dc667535b6fe9e0f458f6e3 100644 --- a/substrate/client/rpc/src/statement/mod.rs +++ b/substrate/client/rpc/src/statement/mod.rs @@ -89,7 +89,7 @@ impl StatementApiServer for StatementStore { fn submit(&self, encoded: Bytes) -> RpcResult<()> { let statement = Decode::decode(&mut &*encoded) - .map_err(|e| Error::StatementStore(format!("Eror decoding statement: {:?}", e)))?; + .map_err(|e| Error::StatementStore(format!("Error decoding statement: {:?}", e)))?; match self.store.submit(statement, StatementSource::Local) { SubmitResult::New(_) | SubmitResult::Known => Ok(()), // `KnownExpired` should not happen. Expired statements submitted with diff --git a/substrate/client/service/Cargo.toml b/substrate/client/service/Cargo.toml index bbf67d1fbd0af6dceada3b73178004558044d0b2..b81f2e2f55ab3ffd69d9e48e2a8a8f13440a6f3f 100644 --- a/substrate/client/service/Cargo.toml +++ b/substrate/client/service/Cargo.toml @@ -30,7 +30,7 @@ runtime-benchmarks = [ [dependencies] jsonrpsee = { version = "0.22", features = ["server"] } thiserror = { workspace = true } -futures = "0.3.21" +futures = "0.3.30" rand = "0.8.5" parking_lot = "0.12.1" log = { workspace = true, default-features = true } @@ -79,7 +79,7 @@ sc-tracing = { path = "../tracing" } sc-sysinfo = { path = "../sysinfo" } tracing = "0.1.29" tracing-futures = { version = "0.2.4" } -async-trait = "0.1.74" +async-trait = "0.1.79" tokio = { version = "1.22.0", features = ["parking_lot", "rt-multi-thread", "time"] } tempfile = "3.1.0" directories = "5.0.1" diff --git a/substrate/client/service/src/chain_ops/import_blocks.rs b/substrate/client/service/src/chain_ops/import_blocks.rs index 34f7669d0106e2065c3b08f7816a002c9d1175ae..661fc09a8f19e55c38bec280b7ce399a8dbe2e29 100644 --- a/substrate/client/service/src/chain_ops/import_blocks.rs +++ b/substrate/client/service/src/chain_ops/import_blocks.rs @@ -73,7 +73,7 @@ where reader: CodecIoReader, }, Json { - // Nubmer of blocks we have decoded thus far. + // Number of blocks we have decoded thus far. read_block_count: u64, // Stream to the data, used for decoding new blocks. reader: StreamDeserializer<'static, JsonIoRead, SignedBlock>, diff --git a/substrate/client/service/src/config.rs b/substrate/client/service/src/config.rs index 35262ff493b44f07a94f1d35f1a3e765251a1897..59e307d7f93b529f37ab457dd6bfe0b312065444 100644 --- a/substrate/client/service/src/config.rs +++ b/substrate/client/service/src/config.rs @@ -236,7 +236,7 @@ impl Configuration { ProtocolId::from(protocol_id_full) } - /// Returns true if the genesis state writting will be skipped while initializing the genesis + /// Returns true if the genesis state writing will be skipped while initializing the genesis /// block. pub fn no_genesis(&self) -> bool { matches!(self.network.sync_mode, SyncMode::LightState { .. } | SyncMode::Warp { .. }) diff --git a/substrate/client/service/test/Cargo.toml b/substrate/client/service/test/Cargo.toml index ee7e60f6011701f9d12fdb521db8226c883dbfe4..2de984689badbacae254c7a596e996baa491dda5 100644 --- a/substrate/client/service/test/Cargo.toml +++ b/substrate/client/service/test/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] async-channel = "1.8.0" array-bytes = "6.1" fdlimit = "0.3.0" -futures = "0.3.21" +futures = "0.3.30" log = { workspace = true, default-features = true } parity-scale-codec = "3.6.1" parking_lot = "0.12.1" diff --git a/substrate/client/state-db/src/lib.rs b/substrate/client/state-db/src/lib.rs index 41c231c31aafbc6453fa4343640c27b733174c28..6c37c87a611b1288c6aecc4b79b4a3e159e2b9dd 100644 --- a/substrate/client/state-db/src/lib.rs +++ b/substrate/client/state-db/src/lib.rs @@ -872,7 +872,7 @@ mod tests { fn check_stored_and_requested_mode_compatibility( mode_when_created: Option, mode_when_reopened: Option, - expected_effective_mode_when_reopenned: Result, + expected_effective_mode_when_reopened: Result, ) { let mut db = make_db(&[]); let (state_db_init, state_db) = @@ -883,7 +883,7 @@ mod tests { let state_db_reopen_result = StateDb::::open(db.clone(), mode_when_reopened, false, false); - if let Ok(expected_mode) = expected_effective_mode_when_reopenned { + if let Ok(expected_mode) = expected_effective_mode_when_reopened { let (state_db_init, state_db_reopened) = state_db_reopen_result.unwrap(); db.commit(&state_db_init); assert_eq!(state_db_reopened.pruning_mode(), expected_mode,) diff --git a/substrate/client/state-db/src/noncanonical.rs b/substrate/client/state-db/src/noncanonical.rs index bdbe8318371ce8239158f4f728a7313e107db879..4492a7bd077df02c6feb111051273742ef79604b 100644 --- a/substrate/client/state-db/src/noncanonical.rs +++ b/substrate/client/state-db/src/noncanonical.rs @@ -46,26 +46,26 @@ pub struct NonCanonicalOverlay { #[cfg_attr(test, derive(PartialEq, Debug))] struct OverlayLevel { blocks: Vec>, - used_indicies: u64, // Bitmask of available journal indicies. + used_indices: u64, // Bitmask of available journal indices. } impl OverlayLevel { fn push(&mut self, overlay: BlockOverlay) { - self.used_indicies |= 1 << overlay.journal_index; + self.used_indices |= 1 << overlay.journal_index; self.blocks.push(overlay) } fn available_index(&self) -> u64 { - self.used_indicies.trailing_ones() as u64 + self.used_indices.trailing_ones() as u64 } fn remove(&mut self, index: usize) -> BlockOverlay { - self.used_indicies &= !(1 << self.blocks[index].journal_index); + self.used_indices &= !(1 << self.blocks[index].journal_index); self.blocks.remove(index) } fn new() -> OverlayLevel { - OverlayLevel { blocks: Vec::new(), used_indicies: 0 } + OverlayLevel { blocks: Vec::new(), used_indices: 0 } } } diff --git a/substrate/client/statement-store/Cargo.toml b/substrate/client/statement-store/Cargo.toml index 676f6cb36f67992c86db1174cbbf96e02e53d1b1..8ca6d11dbe0dc786929925a90a40b5035f2607be 100644 --- a/substrate/client/statement-store/Cargo.toml +++ b/substrate/client/statement-store/Cargo.toml @@ -31,4 +31,4 @@ sc-keystore = { path = "../keystore" } [dev-dependencies] tempfile = "3.1.0" -env_logger = "0.9" +env_logger = "0.11" diff --git a/substrate/client/sysinfo/Cargo.toml b/substrate/client/sysinfo/Cargo.toml index ba58452ffb58bfd737235beb285e6e46c992967d..32b7755c64b5046fcd31cb6e0d9ff2c1061ed57a 100644 --- a/substrate/client/sysinfo/Cargo.toml +++ b/substrate/client/sysinfo/Cargo.toml @@ -17,7 +17,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -futures = "0.3.19" +futures = "0.3.30" libc = "0.2" log = { workspace = true, default-features = true } rand = "0.8.5" diff --git a/substrate/client/telemetry/Cargo.toml b/substrate/client/telemetry/Cargo.toml index 8ab00202f0ba6828dbf210e44d6337393ba1d391..9a29a33a591f4e0491673b9ef3d7f86e0546220d 100644 --- a/substrate/client/telemetry/Cargo.toml +++ b/substrate/client/telemetry/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] chrono = "0.4.31" -futures = "0.3.21" +futures = "0.3.30" libp2p = { version = "0.51.4", features = ["dns", "tcp", "tokio", "wasm-ext", "websocket"] } log = { workspace = true, default-features = true } parking_lot = "0.12.1" diff --git a/substrate/client/telemetry/src/lib.rs b/substrate/client/telemetry/src/lib.rs index 113d8303a20f6fedcf8f3c75108896bbbb8f8756..7e3a4ee8639308bd4678288db583e31c5085af5b 100644 --- a/substrate/client/telemetry/src/lib.rs +++ b/substrate/client/telemetry/src/lib.rs @@ -413,7 +413,7 @@ impl Telemetry { .map_err(|_| Error::TelemetryWorkerDropped) } - /// Make a new cloneable handle to this [`Telemetry`]. This is used for reporting telemetries. + /// Make a new clonable handle to this [`Telemetry`]. This is used for reporting telemetries. pub fn handle(&self) -> TelemetryHandle { TelemetryHandle { message_sender: Arc::new(Mutex::new(self.message_sender.clone())), diff --git a/substrate/client/transaction-pool/Cargo.toml b/substrate/client/transaction-pool/Cargo.toml index 2ca37afd61b84a7228852290d5075841c993a503..e2a0b87eaabb79383299036f92f5b3899b1b4e04 100644 --- a/substrate/client/transaction-pool/Cargo.toml +++ b/substrate/client/transaction-pool/Cargo.toml @@ -16,9 +16,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.6.1" } -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" linked-hash-map = "0.5.4" log = { workspace = true, default-features = true } diff --git a/substrate/client/transaction-pool/README.md b/substrate/client/transaction-pool/README.md index b55dc6482d64eb5342e684a82989f0ab2732b784..7a53727d576103800375a99d0544b3b67efad79e 100644 --- a/substrate/client/transaction-pool/README.md +++ b/substrate/client/transaction-pool/README.md @@ -171,7 +171,7 @@ This parameter instructs the pool propagate/gossip a transaction to node peers. By default this should be `true`, however in some cases it might be undesirable to propagate transactions further. Examples might include heavy transactions produced by block authors in offchain workers (DoS) or risking being front -runned by someone else after finding some non trivial solution or equivocation, +ran by someone else after finding some non trivial solution or equivocation, etc. ### 'TransactionSource` diff --git a/substrate/client/transaction-pool/api/Cargo.toml b/substrate/client/transaction-pool/api/Cargo.toml index d52e4783fabce48992dec01e83c61bc7a3074a20..1bb72ef55442216e321c564838a93a75a44f62eb 100644 --- a/substrate/client/transaction-pool/api/Cargo.toml +++ b/substrate/client/transaction-pool/api/Cargo.toml @@ -12,9 +12,9 @@ description = "Transaction pool client facing API." workspace = true [dependencies] -async-trait = "0.1.74" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.6.1" } -futures = "0.3.21" +futures = "0.3.30" log = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } thiserror = { workspace = true } diff --git a/substrate/client/transaction-pool/tests/pool.rs b/substrate/client/transaction-pool/tests/pool.rs index 461b9860d414a3495dc34bf4f149963fa4d0a903..49bd2203c12b48454844e1bc3ce09c2c446fc9b6 100644 --- a/substrate/client/transaction-pool/tests/pool.rs +++ b/substrate/client/transaction-pool/tests/pool.rs @@ -999,7 +999,7 @@ fn import_notification_to_pool_maintain_works() { .0, ); - // Prepare the extrisic, push it to the pool and check that it was added. + // Prepare the extrinsic, push it to the pool and check that it was added. let xt = uxt(Alice, 0); block_on(pool.submit_one( pool.api().block_id_to_hash(&BlockId::Number(0)).unwrap().unwrap(), diff --git a/substrate/client/utils/Cargo.toml b/substrate/client/utils/Cargo.toml index 7f604219bc09a96ebb8114df01e10f7d474a87f4..a101f4b3f3ad009a35c1cc061e4c9515b98e1419 100644 --- a/substrate/client/utils/Cargo.toml +++ b/substrate/client/utils/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] async-channel = "1.8.0" -futures = "0.3.21" +futures = "0.3.30" futures-timer = "3.0.2" lazy_static = "1.4.0" log = { workspace = true, default-features = true } diff --git a/substrate/client/utils/src/mpsc.rs b/substrate/client/utils/src/mpsc.rs index c24a5bd8904afc99109acf8f5c6fe5872b5d9874..91db7e1e7b011142f5b1089eb74a209a612cf4b9 100644 --- a/substrate/client/utils/src/mpsc.rs +++ b/substrate/client/utils/src/mpsc.rs @@ -86,7 +86,7 @@ pub fn tracing_unbounded( warning_fired: Arc::new(AtomicBool::new(false)), creation_backtrace: Arc::new(Backtrace::force_capture()), }; - let receiver = TracingUnboundedReceiver { inner: r, name }; + let receiver = TracingUnboundedReceiver { inner: r, name: name.into() }; (sender, receiver) } @@ -157,6 +157,11 @@ impl TracingUnboundedReceiver { pub fn len(&self) -> usize { self.inner.len() } + + /// The name of this receiver + pub fn name(&self) -> &'static str { + self.name + } } impl Drop for TracingUnboundedReceiver { diff --git a/substrate/client/utils/src/notification.rs b/substrate/client/utils/src/notification.rs index dabb85d613cc97e1a95133a5de130b505f5050e4..3f606aabe3a156071ad0fbfffc8c5c8fd55728e2 100644 --- a/substrate/client/utils/src/notification.rs +++ b/substrate/client/utils/src/notification.rs @@ -44,7 +44,7 @@ mod tests; /// and identify the mpsc channels. pub trait TracingKeyStr { /// Const `str` representing the "tracing key" used to tag and identify - /// the mpsc channels owned by the object implemeting this trait. + /// the mpsc channels owned by the object implementing this trait. const TRACING_KEY: &'static str; } diff --git a/substrate/frame/Cargo.toml b/substrate/frame/Cargo.toml index 6746723e72f79498cf764199866936b93373858e..27001ee5afd9eed4c09f48e2683663685c0a8183 100644 --- a/substrate/frame/Cargo.toml +++ b/substrate/frame/Cargo.toml @@ -22,7 +22,7 @@ targets = ["x86_64-unknown-linux-gnu"] parity-scale-codec = { version = "3.2.2", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.6.0", default-features = false, features = [ +scale-info = { version = "2.11.1", default-features = false, features = [ "derive", ] } @@ -51,7 +51,7 @@ sp-inherents = { default-features = false, path = "../primitives/inherents", opt frame-executive = { default-features = false, path = "../frame/executive", optional = true } frame-system-rpc-runtime-api = { default-features = false, path = "../frame/system/rpc/runtime-api", optional = true } -docify = "0.2.7" +docify = "0.2.8" log = { workspace = true } [dev-dependencies] diff --git a/substrate/frame/alliance/Cargo.toml b/substrate/frame/alliance/Cargo.toml index bc873ad69c803a9f686d5329cffde11c76e8dc35..af1fcb296f6726983c654238dbc2d2161e4eec1a 100644 --- a/substrate/frame/alliance/Cargo.toml +++ b/substrate/frame/alliance/Cargo.toml @@ -20,7 +20,7 @@ array-bytes = { version = "6.1", optional = true } log = { workspace = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-std = { path = "../../primitives/std", default-features = false } sp-core = { path = "../../primitives/core", default-features = false } diff --git a/substrate/frame/alliance/README.md b/substrate/frame/alliance/README.md index 9930008e2d63558a9f83d906da92ecbf29f11a7a..16335a98f59f67a06b9ed6f18fe2c41e43124a34 100644 --- a/substrate/frame/alliance/README.md +++ b/substrate/frame/alliance/README.md @@ -58,7 +58,7 @@ to update the Alliance's rule and make announcements. - `add_unscrupulous_items` - Add some items, either accounts or websites, to the list of unscrupulous items. - `remove_unscrupulous_items` - Remove some items from the list of unscrupulous items. -- `abdicate_fellow_status` - Abdicate one's voting rights, demoting themself to Ally. +- `abdicate_fellow_status` - Abdicate one's voting rights, demoting themselves to Ally. #### Root Calls diff --git a/substrate/frame/alliance/src/benchmarking.rs b/substrate/frame/alliance/src/benchmarking.rs index 4ccd0fc08f6ac1c8ac7e6009b555a470eb4a2779..710c32a848dd152b4ae4cf02c1be87e295ae7da4 100644 --- a/substrate/frame/alliance/src/benchmarking.rs +++ b/substrate/frame/alliance/src/benchmarking.rs @@ -505,8 +505,8 @@ mod benchmarks { assert_last_event::( Event::MembersInitialized { fellows: fellows.clone(), allies: allies.clone() }.into(), ); - assert_eq!(Alliance::::members(MemberRole::Fellow), fellows); - assert_eq!(Alliance::::members(MemberRole::Ally), allies); + assert_eq!(Members::::get(MemberRole::Fellow), fellows); + assert_eq!(Members::::get(MemberRole::Ally), allies); Ok(()) } @@ -563,7 +563,7 @@ mod benchmarks { { call.dispatch_bypass_filter(origin)?; } - assert_eq!(Alliance::::rule(), Some(rule.clone())); + assert_eq!(Rule::::get(), Some(rule.clone())); assert_last_event::(Event::NewRuleSet { rule }.into()); Ok(()) } @@ -583,7 +583,7 @@ mod benchmarks { call.dispatch_bypass_filter(origin)?; } - assert!(Alliance::::announcements().contains(&announcement)); + assert!(Announcements::::get().contains(&announcement)); assert_last_event::(Event::Announced { announcement }.into()); Ok(()) } @@ -606,7 +606,7 @@ mod benchmarks { call.dispatch_bypass_filter(origin)?; } - assert!(!Alliance::::announcements().contains(&announcement)); + assert!(!Announcements::::get().contains(&announcement)); assert_last_event::(Event::AnnouncementRemoved { announcement }.into()); Ok(()) } diff --git a/substrate/frame/alliance/src/lib.rs b/substrate/frame/alliance/src/lib.rs index d4703db68dbba34988516ba08b5efcebcb79e5e9..1f06241e9c83b088137547ec47372aa8e84142d7 100644 --- a/substrate/frame/alliance/src/lib.rs +++ b/substrate/frame/alliance/src/lib.rs @@ -442,24 +442,20 @@ pub mod pallet { /// The IPFS CID of the alliance rule. /// Fellows can propose a new rule with a super-majority. #[pallet::storage] - #[pallet::getter(fn rule)] pub type Rule, I: 'static = ()> = StorageValue<_, Cid, OptionQuery>; /// The current IPFS CIDs of any announcements. #[pallet::storage] - #[pallet::getter(fn announcements)] pub type Announcements, I: 'static = ()> = StorageValue<_, BoundedVec, ValueQuery>; /// Maps members to their candidacy deposit. #[pallet::storage] - #[pallet::getter(fn deposit_of)] pub type DepositOf, I: 'static = ()> = StorageMap<_, Blake2_128Concat, T::AccountId, BalanceOf, OptionQuery>; /// Maps member type to members of each type. #[pallet::storage] - #[pallet::getter(fn members)] pub type Members, I: 'static = ()> = StorageMap< _, Twox64Concat, @@ -471,20 +467,17 @@ pub mod pallet { /// A set of members who gave a retirement notice. They can retire after the end of retirement /// period stored as a future block number. #[pallet::storage] - #[pallet::getter(fn retiring_members)] pub type RetiringMembers, I: 'static = ()> = StorageMap<_, Blake2_128Concat, T::AccountId, BlockNumberFor, OptionQuery>; /// The current list of accounts deemed unscrupulous. These accounts non grata cannot submit /// candidacy. #[pallet::storage] - #[pallet::getter(fn unscrupulous_accounts)] pub type UnscrupulousAccounts, I: 'static = ()> = StorageValue<_, BoundedVec, ValueQuery>; /// The current list of websites deemed unscrupulous. #[pallet::storage] - #[pallet::getter(fn unscrupulous_websites)] pub type UnscrupulousWebsites, I: 'static = ()> = StorageValue<_, BoundedVec, T::MaxUnscrupulousItems>, ValueQuery>; @@ -505,10 +498,10 @@ pub mod pallet { proposal: Box<>::Proposal>, #[pallet::compact] length_bound: u32, ) -> DispatchResult { - let proposor = ensure_signed(origin)?; - ensure!(Self::has_voting_rights(&proposor), Error::::NoVotingRights); + let proposer = ensure_signed(origin)?; + ensure!(Self::has_voting_rights(&proposer), Error::::NoVotingRights); - T::ProposalProvider::propose_proposal(proposor, threshold, proposal, length_bound)?; + T::ProposalProvider::propose_proposal(proposer, threshold, proposal, length_bound)?; Ok(()) } diff --git a/substrate/frame/alliance/src/migration.rs b/substrate/frame/alliance/src/migration.rs index 432f09a16f47772ead741a0f312eefeb795e2d69..b4ecc1819447dc68f6ede10611ba88b97a677b68 100644 --- a/substrate/frame/alliance/src/migration.rs +++ b/substrate/frame/alliance/src/migration.rs @@ -162,18 +162,18 @@ pub(crate) mod v1_to_v2 { #[cfg(test)] mod test { use super::*; - use crate::{mock::*, MemberRole}; + use crate::{mock::*, MemberRole, Members}; #[test] fn migration_v1_to_v2_works() { new_test_ext().execute_with(|| { assert_ok!(Alliance::join_alliance(RuntimeOrigin::signed(4))); - assert_eq!(Alliance::members(MemberRole::Ally), vec![4]); - assert_eq!(Alliance::members(MemberRole::Fellow), vec![1, 2, 3]); + assert_eq!(Members::::get(MemberRole::Ally), vec![4]); + assert_eq!(Members::::get(MemberRole::Fellow), vec![1, 2, 3]); v1_to_v2::migrate::(); - assert_eq!(Alliance::members(MemberRole::Fellow), vec![1, 2, 3, 4]); - assert_eq!(Alliance::members(MemberRole::Ally), vec![]); - assert_eq!(Alliance::members(MemberRole::Retiring), vec![]); + assert_eq!(Members::::get(MemberRole::Fellow), vec![1, 2, 3, 4]); + assert_eq!(Members::::get(MemberRole::Ally), vec![]); + assert_eq!(Members::::get(MemberRole::Retiring), vec![]); }); } } diff --git a/substrate/frame/alliance/src/tests.rs b/substrate/frame/alliance/src/tests.rs index 710de5a54bc95a607b51f4078db94d7c24c92272..edb515b8115a57eeac4fdade7ca4831e43ff17f9 100644 --- a/substrate/frame/alliance/src/tests.rs +++ b/substrate/frame/alliance/src/tests.rs @@ -21,12 +21,12 @@ use frame_support::{assert_noop, assert_ok, error::BadOrigin}; use frame_system::{EventRecord, Phase}; use super::*; -use crate::mock::*; +use crate::{self as alliance, mock::*}; type AllianceMotionEvent = pallet_collective::Event; fn assert_powerless(user: RuntimeOrigin, user_is_member: bool) { - //vote / veto with a valid propsal + //vote / veto with a valid proposal let cid = test_cid(); let (proposal, _, _) = make_kick_member_proposal(42); @@ -118,7 +118,7 @@ fn disband_works() { // join alliance and reserve funds assert_eq!(Balances::free_balance(9), 1000 - id_deposit); assert_ok!(Alliance::join_alliance(RuntimeOrigin::signed(9))); - assert_eq!(Alliance::deposit_of(9), Some(expected_join_deposit)); + assert_eq!(alliance::DepositOf::::get(9), Some(expected_join_deposit)); assert_eq!(Balances::free_balance(9), 1000 - id_deposit - expected_join_deposit); assert!(Alliance::is_member_of(&9, MemberRole::Ally)); @@ -314,7 +314,7 @@ fn set_rule_works() { new_test_ext().execute_with(|| { let cid = test_cid(); assert_ok!(Alliance::set_rule(RuntimeOrigin::signed(1), cid.clone())); - assert_eq!(Alliance::rule(), Some(cid.clone())); + assert_eq!(alliance::Rule::::get(), Some(cid.clone())); System::assert_last_event(mock::RuntimeEvent::Alliance(crate::Event::NewRuleSet { rule: cid, @@ -330,7 +330,7 @@ fn announce_works() { assert_noop!(Alliance::announce(RuntimeOrigin::signed(2), cid.clone()), BadOrigin); assert_ok!(Alliance::announce(RuntimeOrigin::signed(3), cid.clone())); - assert_eq!(Alliance::announcements(), vec![cid.clone()]); + assert_eq!(alliance::Announcements::::get(), vec![cid.clone()]); System::assert_last_event(mock::RuntimeEvent::Alliance(crate::Event::Announced { announcement: cid, @@ -343,7 +343,7 @@ fn remove_announcement_works() { new_test_ext().execute_with(|| { let cid = test_cid(); assert_ok!(Alliance::announce(RuntimeOrigin::signed(3), cid.clone())); - assert_eq!(Alliance::announcements(), vec![cid.clone()]); + assert_eq!(alliance::Announcements::::get(), vec![cid.clone()]); System::assert_last_event(mock::RuntimeEvent::Alliance(crate::Event::Announced { announcement: cid.clone(), })); @@ -351,7 +351,7 @@ fn remove_announcement_works() { System::set_block_number(2); assert_ok!(Alliance::remove_announcement(RuntimeOrigin::signed(3), cid.clone())); - assert_eq!(Alliance::announcements(), vec![]); + assert_eq!(alliance::Announcements::::get(), vec![]); System::assert_last_event(mock::RuntimeEvent::Alliance( crate::Event::AnnouncementRemoved { announcement: cid }, )); @@ -394,8 +394,8 @@ fn join_alliance_works() { // success to submit assert_ok!(Alliance::join_alliance(RuntimeOrigin::signed(4))); assert_eq!(Balances::free_balance(4), 1000 - id_deposit - join_deposit); - assert_eq!(Alliance::deposit_of(4), Some(25)); - assert_eq!(Alliance::members(MemberRole::Ally), vec![4]); + assert_eq!(alliance::DepositOf::::get(4), Some(25)); + assert_eq!(alliance::Members::::get(MemberRole::Ally), vec![4]); // check already member assert_noop!( @@ -449,8 +449,8 @@ fn nominate_ally_works() { // success to nominate assert_ok!(Alliance::nominate_ally(RuntimeOrigin::signed(1), 4)); - assert_eq!(Alliance::deposit_of(4), None); - assert_eq!(Alliance::members(MemberRole::Ally), vec![4]); + assert_eq!(alliance::DepositOf::::get(4), None); + assert_eq!(alliance::Members::::get(MemberRole::Ally), vec![4]); // check already member assert_noop!( @@ -482,12 +482,12 @@ fn elevate_ally_works() { ); assert_ok!(Alliance::join_alliance(RuntimeOrigin::signed(4))); - assert_eq!(Alliance::members(MemberRole::Ally), vec![4]); - assert_eq!(Alliance::members(MemberRole::Fellow), vec![1, 2, 3]); + assert_eq!(alliance::Members::::get(MemberRole::Ally), vec![4]); + assert_eq!(alliance::Members::::get(MemberRole::Fellow), vec![1, 2, 3]); assert_ok!(Alliance::elevate_ally(RuntimeOrigin::signed(2), 4)); - assert_eq!(Alliance::members(MemberRole::Ally), Vec::::new()); - assert_eq!(Alliance::members(MemberRole::Fellow), vec![1, 2, 3, 4]); + assert_eq!(alliance::Members::::get(MemberRole::Ally), Vec::::new()); + assert_eq!(alliance::Members::::get(MemberRole::Fellow), vec![1, 2, 3, 4]); }); } @@ -499,10 +499,10 @@ fn give_retirement_notice_work() { Error::::NotMember ); - assert_eq!(Alliance::members(MemberRole::Fellow), vec![1, 2, 3]); + assert_eq!(alliance::Members::::get(MemberRole::Fellow), vec![1, 2, 3]); assert_ok!(Alliance::give_retirement_notice(RuntimeOrigin::signed(3))); - assert_eq!(Alliance::members(MemberRole::Fellow), vec![1, 2]); - assert_eq!(Alliance::members(MemberRole::Retiring), vec![3]); + assert_eq!(alliance::Members::::get(MemberRole::Fellow), vec![1, 2]); + assert_eq!(alliance::Members::::get(MemberRole::Retiring), vec![3]); System::assert_last_event(mock::RuntimeEvent::Alliance( crate::Event::MemberRetirementPeriodStarted { member: (3) }, )); @@ -527,7 +527,7 @@ fn retire_works() { Error::::RetirementNoticeNotGiven ); - assert_eq!(Alliance::members(MemberRole::Fellow), vec![1, 2, 3]); + assert_eq!(alliance::Members::::get(MemberRole::Fellow), vec![1, 2, 3]); assert_ok!(Alliance::give_retirement_notice(RuntimeOrigin::signed(3))); assert_noop!( Alliance::retire(RuntimeOrigin::signed(3)), @@ -535,7 +535,7 @@ fn retire_works() { ); System::set_block_number(System::block_number() + RetirementPeriod::get()); assert_ok!(Alliance::retire(RuntimeOrigin::signed(3))); - assert_eq!(Alliance::members(MemberRole::Fellow), vec![1, 2]); + assert_eq!(alliance::Members::::get(MemberRole::Fellow), vec![1, 2]); System::assert_last_event(mock::RuntimeEvent::Alliance(crate::Event::MemberRetired { member: (3), unreserved: None, @@ -551,7 +551,7 @@ fn retire_works() { #[test] fn abdicate_works() { new_test_ext().execute_with(|| { - assert_eq!(Alliance::members(MemberRole::Fellow), vec![1, 2, 3]); + assert_eq!(alliance::Members::::get(MemberRole::Fellow), vec![1, 2, 3]); assert_ok!(Alliance::abdicate_fellow_status(RuntimeOrigin::signed(3))); System::assert_last_event(mock::RuntimeEvent::Alliance(crate::Event::FellowAbdicated { @@ -573,9 +573,9 @@ fn kick_member_works() { ); >::insert(2, 25); - assert_eq!(Alliance::members(MemberRole::Fellow), vec![1, 2, 3]); + assert_eq!(alliance::Members::::get(MemberRole::Fellow), vec![1, 2, 3]); assert_ok!(Alliance::kick_member(RuntimeOrigin::signed(2), 2)); - assert_eq!(Alliance::members(MemberRole::Fellow), vec![1, 3]); + assert_eq!(alliance::Members::::get(MemberRole::Fellow), vec![1, 3]); assert_eq!(>::get(2), None); System::assert_last_event(mock::RuntimeEvent::Alliance(crate::Event::MemberKicked { member: (2), @@ -596,8 +596,11 @@ fn add_unscrupulous_items_works() { UnscrupulousItem::Website("abc".as_bytes().to_vec().try_into().unwrap()) ] )); - assert_eq!(Alliance::unscrupulous_accounts().into_inner(), vec![3]); - assert_eq!(Alliance::unscrupulous_websites().into_inner(), vec!["abc".as_bytes().to_vec()]); + assert_eq!(alliance::UnscrupulousAccounts::::get().into_inner(), vec![3]); + assert_eq!( + alliance::UnscrupulousWebsites::::get().into_inner(), + vec!["abc".as_bytes().to_vec()] + ); assert_noop!( Alliance::add_unscrupulous_items( @@ -629,12 +632,12 @@ fn remove_unscrupulous_items_works() { RuntimeOrigin::signed(3), vec![UnscrupulousItem::AccountId(3)] )); - assert_eq!(Alliance::unscrupulous_accounts(), vec![3]); + assert_eq!(alliance::UnscrupulousAccounts::::get(), vec![3]); assert_ok!(Alliance::remove_unscrupulous_items( RuntimeOrigin::signed(3), vec![UnscrupulousItem::AccountId(3)] )); - assert_eq!(Alliance::unscrupulous_accounts(), Vec::::new()); + assert_eq!(alliance::UnscrupulousAccounts::::get(), Vec::::new()); }); } diff --git a/substrate/frame/asset-conversion/Cargo.toml b/substrate/frame/asset-conversion/Cargo.toml index 1f2db14dac2dbe39dc16ddc206af6eb18fc4ca45..1a8e8eea4849ad76f5185ccb6a76745db07d4513 100644 --- a/substrate/frame/asset-conversion/Cargo.toml +++ b/substrate/frame/asset-conversion/Cargo.toml @@ -20,7 +20,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-api = { path = "../../primitives/api", default-features = false } sp-core = { path = "../../primitives/core", default-features = false } sp-io = { path = "../../primitives/io", default-features = false } diff --git a/substrate/frame/asset-conversion/src/lib.rs b/substrate/frame/asset-conversion/src/lib.rs index c9725f9d39d68f8fd8fbb8257a802975f9b9589b..0bf73e8809cfb3cb305716b8561846ff9a1ed631 100644 --- a/substrate/frame/asset-conversion/src/lib.rs +++ b/substrate/frame/asset-conversion/src/lib.rs @@ -205,7 +205,7 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { - /// A successful call of the `CretaPool` extrinsic will create this event. + /// A successful call of the `CreatePool` extrinsic will create this event. PoolCreated { /// The account that created the pool. creator: T::AccountId, diff --git a/substrate/frame/asset-rate/Cargo.toml b/substrate/frame/asset-rate/Cargo.toml index 6e7bbf29fc43219dbe084afeb1254a0783510746..cd502148a8d8f116afcbcde8969490a454da81b7 100644 --- a/substrate/frame/asset-rate/Cargo.toml +++ b/substrate/frame/asset-rate/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/asset-rate/src/lib.rs b/substrate/frame/asset-rate/src/lib.rs index befabfe54aa0e766d498eccf2201de4e9c0f2fc9..69f8267a4f25fe2fa82113eb6d9673b05b4ab6aa 100644 --- a/substrate/frame/asset-rate/src/lib.rs +++ b/substrate/frame/asset-rate/src/lib.rs @@ -112,7 +112,7 @@ pub mod pallet { /// The origin permissioned to remove an existing conversion rate for an asset. type RemoveOrigin: EnsureOrigin; - /// The origin permissioned to update an existiing conversion rate for an asset. + /// The origin permissioned to update an existing conversion rate for an asset. type UpdateOrigin: EnsureOrigin; /// The currency mechanism for this pallet. diff --git a/substrate/frame/assets/Cargo.toml b/substrate/frame/assets/Cargo.toml index 2efc96348cb5447b792c334256076a395f646719..3b95750c14c8c9c78ad86442085580810e447f5a 100644 --- a/substrate/frame/assets/Cargo.toml +++ b/substrate/frame/assets/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-std = { path = "../../primitives/std", default-features = false } # Needed for various traits. In our case, `OnFinalize`. sp-runtime = { path = "../../primitives/runtime", default-features = false } diff --git a/substrate/frame/assets/src/lib.rs b/substrate/frame/assets/src/lib.rs index 583c75b38276e797782e3159a7463deff08d9746..e5fe2a3d1fd3965b31563480e45eb99fecfd05b7 100644 --- a/substrate/frame/assets/src/lib.rs +++ b/substrate/frame/assets/src/lib.rs @@ -17,7 +17,16 @@ //! # Assets Pallet //! -//! A simple, secure module for dealing with fungible assets. +//! A simple, secure module for dealing with sets of assets implementing +//! [`fungible`](frame_support::traits::fungible) traits, via +//! [`fungibles`](frame_support::traits::fungibles) traits. +//! +//! The pallet makes heavy use of concepts such as Holds and Freezes from the +//! [`frame_support::traits::fungible`] traits, therefore you should read and understand those docs +//! as a prerequisite to understanding this pallet. +//! +//! See the [`frame_tokens`] reference docs for more information about the place of the +//! Assets pallet in FRAME. //! //! ## Overview //! @@ -133,6 +142,8 @@ //! //! * [`System`](../frame_system/index.html) //! * [`Support`](../frame_support/index.html) +//! +//! [`frame_tokens`]: ../polkadot_sdk_docs/reference_docs/frame_tokens/index.html // This recursion limit is needed because we have too many benchmarks and benchmarking will fail if // we add more without this limit. @@ -183,7 +194,7 @@ pub use weights::WeightInfo; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; const LOG_TARGET: &str = "runtime::assets"; -/// Trait with callbacks that are executed after successfull asset creation or destruction. +/// Trait with callbacks that are executed after successful asset creation or destruction. pub trait AssetsCallback { /// Indicates that asset with `id` was successfully created by the `owner` fn created(_id: &AssetId, _owner: &AccountId) -> Result<(), ()> { diff --git a/substrate/frame/assets/src/tests.rs b/substrate/frame/assets/src/tests.rs index e09648a51eccd09408b4549cc28315c8052d9ae5..c7021bcad531046c8881a8a9ef8932f4bd54c011 100644 --- a/substrate/frame/assets/src/tests.rs +++ b/substrate/frame/assets/src/tests.rs @@ -1453,7 +1453,7 @@ fn force_asset_status_should_work() { )); assert_eq!(Assets::balance(0, 1), 50); - // account can recieve assets for balance < min_balance + // account can receive assets for balance < min_balance assert_ok!(Assets::transfer(RuntimeOrigin::signed(2), 0, 1, 1)); assert_eq!(Assets::balance(0, 1), 51); diff --git a/substrate/frame/atomic-swap/Cargo.toml b/substrate/frame/atomic-swap/Cargo.toml index 0283af1d1b06234479cb9f0ad3fe142ba386ccf1..c641071df902d0fa78f4794292bacd824f9712bb 100644 --- a/substrate/frame/atomic-swap/Cargo.toml +++ b/substrate/frame/atomic-swap/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } sp-core = { path = "../../primitives/core", default-features = false } diff --git a/substrate/frame/aura/Cargo.toml b/substrate/frame/aura/Cargo.toml index de698487efa7fa6c1cc37cc89e872b9603a9f253..97349107f1f4362b3aa6c7949f82ab3b647dfe42 100644 --- a/substrate/frame/aura/Cargo.toml +++ b/substrate/frame/aura/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } pallet-timestamp = { path = "../timestamp", default-features = false } diff --git a/substrate/frame/aura/src/lib.rs b/substrate/frame/aura/src/lib.rs index 997f51ba428fa095584eb147990941a2838c69e0..3ca1444aaae9b61511a33ed6a9dc9075a2e8c864 100644 --- a/substrate/frame/aura/src/lib.rs +++ b/substrate/frame/aura/src/lib.rs @@ -114,6 +114,7 @@ pub mod pallet { /// The effective value of this type should not change while the chain is running. /// /// For backwards compatibility either use [`MinimumPeriodTimesTwo`] or a const. + #[pallet::constant] type SlotDuration: Get<::Moment>; } diff --git a/substrate/frame/authority-discovery/Cargo.toml b/substrate/frame/authority-discovery/Cargo.toml index 0922007e57e82df51d89500078b02164942b9f45..a7aba711a5686088241e4507e686683114641bf7 100644 --- a/substrate/frame/authority-discovery/Cargo.toml +++ b/substrate/frame/authority-discovery/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } pallet-session = { path = "../session", default-features = false, features = [ diff --git a/substrate/frame/authorship/Cargo.toml b/substrate/frame/authorship/Cargo.toml index 4b318f12519eacfbacbff9987b1490064b692b66..2bfd59a48e10e0cee249d62680e66d7ce426cc3f 100644 --- a/substrate/frame/authorship/Cargo.toml +++ b/substrate/frame/authorship/Cargo.toml @@ -20,7 +20,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } impl-trait-for-tuples = "0.2.2" -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false } diff --git a/substrate/frame/babe/Cargo.toml b/substrate/frame/babe/Cargo.toml index fc7385efa1f14c371af682304f3e1c8d3c614c2d..9f6ef2bc05ea18e96786bd59b697c0290fd2d398 100644 --- a/substrate/frame/babe/Cargo.toml +++ b/substrate/frame/babe/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/babe/src/benchmarking.rs b/substrate/frame/babe/src/benchmarking.rs index 92f55665913e24b8f254cf0d1610bc87c737a6e4..6b0e31e84718f4311421eec18755d562c7b24db9 100644 --- a/substrate/frame/babe/src/benchmarking.rs +++ b/substrate/frame/babe/src/benchmarking.rs @@ -31,7 +31,7 @@ benchmarks! { // NOTE: generated with the test below `test_generate_equivocation_report_blob`. // the output is not deterministic since keys are generated randomly (and therefore // signature content changes). it should not affect the benchmark. - // with the current benchmark setup it is not possible to generate this programatically + // with the current benchmark setup it is not possible to generate this programmatically // from the benchmark setup. const EQUIVOCATION_PROOF_BLOB: [u8; 416] = [ 222, 241, 46, 66, 243, 228, 135, 233, 177, 64, 149, 170, 141, 92, 193, 106, 51, 73, 31, diff --git a/substrate/frame/bags-list/Cargo.toml b/substrate/frame/bags-list/Cargo.toml index f9ae462e16d729f9652da114f5ebcef4cc2f21b7..5deb504d0a4f2846748bd0fb4970cfbfdde41733 100644 --- a/substrate/frame/bags-list/Cargo.toml +++ b/substrate/frame/bags-list/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = [ +scale-info = { version = "2.11.1", default-features = false, features = [ "derive", ] } @@ -34,7 +34,7 @@ frame-election-provider-support = { path = "../election-provider-support", defau # third party log = { workspace = true } -docify = "0.2.7" +docify = "0.2.8" aquamarine = { version = "0.5.0" } # Optional imports for benchmarking diff --git a/substrate/frame/bags-list/src/list/tests.rs b/substrate/frame/bags-list/src/list/tests.rs index fd4ad8f893af38aa29fe0573eb4c22d13be97037..cd39b083172670f0f8f7450ffc546b6fda187313 100644 --- a/substrate/frame/bags-list/src/list/tests.rs +++ b/substrate/frame/bags-list/src/list/tests.rs @@ -431,7 +431,7 @@ mod list { #[test] fn insert_at_unchecked_at_is_only_node() { // Note that this `insert_at_unchecked` test should fail post checks because node 42 does - // not get re-assigned the correct bagu pper. This is because `insert_at_unchecked` assumes + // not get re-assigned the correct bag upper. This is because `insert_at_unchecked` assumes // both nodes are already in the same bag with the correct bag upper. ExtBuilder::default().build_and_execute_no_post_check(|| { // given diff --git a/substrate/frame/balances/Cargo.toml b/substrate/frame/balances/Cargo.toml index 64ae90c67575fc1dcd139b7ed6dcbaa1c3ffda59..28eabdaf5062b4c7250bb5a03dc64debb2f37a8c 100644 --- a/substrate/frame/balances/Cargo.toml +++ b/substrate/frame/balances/Cargo.toml @@ -18,13 +18,13 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false } sp-std = { path = "../../primitives/std", default-features = false } -docify = "0.2.6" +docify = "0.2.8" [dev-dependencies] pallet-transaction-payment = { path = "../transaction-payment" } diff --git a/substrate/frame/balances/src/impl_currency.rs b/substrate/frame/balances/src/impl_currency.rs index 1ac882ade70dfb92df8ae5bbe3897fa7799ae53f..d5fe9934e239e9e4c513e9e021686eec3ca757c3 100644 --- a/substrate/frame/balances/src/impl_currency.rs +++ b/substrate/frame/balances/src/impl_currency.rs @@ -28,8 +28,8 @@ use frame_support::{ tokens::{fungible, BalanceStatus as Status, Fortitude::Polite, Precision::BestEffort}, Currency, DefensiveSaturating, ExistenceRequirement, ExistenceRequirement::AllowDeath, - Get, Imbalance, LockIdentifier, LockableCurrency, NamedReservableCurrency, - ReservableCurrency, SignedImbalance, TryDrop, WithdrawReasons, + Get, Imbalance, InspectLockableCurrency, LockIdentifier, LockableCurrency, + NamedReservableCurrency, ReservableCurrency, SignedImbalance, TryDrop, WithdrawReasons, }, }; use frame_system::pallet_prelude::BlockNumberFor; @@ -918,3 +918,12 @@ where Self::update_locks(who, &locks[..]); } } + +impl, I: 'static> InspectLockableCurrency for Pallet { + fn balance_locked(id: LockIdentifier, who: &T::AccountId) -> Self::Balance { + Self::locks(who) + .into_iter() + .filter(|l| l.id == id) + .fold(Zero::zero(), |acc, l| acc + l.amount) + } +} diff --git a/substrate/frame/balances/src/lib.rs b/substrate/frame/balances/src/lib.rs index 3fd3102cd3522fbfae1c3770a67e1970ffc3b742..685b12499ac0ffb2a27d3f861232ab3dc09bf08f 100644 --- a/substrate/frame/balances/src/lib.rs +++ b/substrate/frame/balances/src/lib.rs @@ -17,11 +17,15 @@ //! # Balances Pallet //! -//! The Balances pallet provides functionality for handling accounts and balances. +//! The Balances pallet provides functionality for handling accounts and balances for a single +//! token. //! -//! - [`Config`] -//! - [`Call`] -//! - [`Pallet`] +//! It makes heavy use of concepts such as Holds and Freezes from the +//! [`frame_support::traits::fungible`] traits, therefore you should read and understand those docs +//! as a prerequisite to understanding this pallet. +//! +//! Also see the [`frame_tokens`] reference docs for higher level information regarding the +//! place of this palet in FRAME. //! //! ## Overview //! @@ -38,42 +42,30 @@ //! //! ### Terminology //! -//! - **Existential Deposit:** The minimum balance required to create or keep an account open. This -//! prevents "dust accounts" from filling storage. When the free plus the reserved balance (i.e. -//! the total balance) fall below this, then the account is said to be dead; and it loses its -//! functionality as well as any prior history and all information on it is removed from the -//! chain's state. No account should ever have a total balance that is strictly between 0 and the -//! existential deposit (exclusive). If this ever happens, it indicates either a bug in this -//! pallet or an erroneous raw mutation of storage. -//! -//! - **Total Issuance:** The total number of units in existence in a system. -//! //! - **Reaping an account:** The act of removing an account by resetting its nonce. Happens after -//! its total balance has become zero (or, strictly speaking, less than the Existential Deposit). -//! -//! - **Free Balance:** The portion of a balance that is not reserved. The free balance is the only -//! balance that matters for most operations. +//! its total balance has become less than the Existential Deposit. //! -//! - **Reserved Balance:** Reserved balance still belongs to the account holder, but is suspended. -//! Reserved balance can still be slashed, but only after all the free balance has been slashed. -//! -//! - **Imbalance:** A condition when some funds were credited or debited without equal and opposite -//! accounting (i.e. a difference between total issuance and account balances). Functions that -//! result in an imbalance will return an object of the `Imbalance` trait that can be managed within -//! your runtime logic. (If an imbalance is simply dropped, it should automatically maintain any -//! book-keeping such as total issuance.) +//! ### Implementations //! -//! - **Lock:** A freeze on a specified amount of an account's free balance until a specified block -//! number. Multiple locks always operate over the same funds, so they "overlay" rather than -//! "stack". +//! The Balances pallet provides implementations for the following [`fungible`] traits. If these +//! traits provide the functionality that you need, then you should avoid tight coupling with the +//! Balances pallet. //! -//! ### Implementations +//! - [`fungible::Inspect`] +//! - [`fungible::Mutate`] +//! - [`fungible::Unbalanced`] +//! - [`fungible::Balanced`] +//! - [`fungible::BalancedHold`] +//! - [`fungible::InspectHold`] +//! - [`fungible::MutateHold`] +//! - [`fungible::InspectFreeze`] +//! - [`fungible::MutateFreeze`] +//! - [`fungible::Imbalance`] //! -//! The Balances pallet provides implementations for the following traits. If these traits provide -//! the functionality that you need, then you can avoid coupling with the Balances pallet. +//! It also implements the following [`Currency`] related traits, however they are deprecated and +//! will eventually be removed. //! -//! - [`Currency`]: Functions for dealing with a -//! fungible assets system. +//! - [`Currency`]: Functions for dealing with a fungible assets system. //! - [`ReservableCurrency`] //! - [`NamedReservableCurrency`](frame_support::traits::NamedReservableCurrency): //! Functions for dealing with assets that can be reserved from an account. @@ -83,14 +75,6 @@ //! imbalances between total issuance in the system and account balances. Must be used when a //! function creates new funds (e.g. a reward) or destroys some funds (e.g. a system fee). //! -//! ## Interface -//! -//! ### Dispatchable Functions -//! -//! - `transfer_allow_death` - Transfer some liquid free balance to another account. -//! - `force_set_balance` - Set the balances of a given account. The origin of this call must be -//! root. -//! //! ## Usage //! //! The following examples show how to use the Balances pallet in your custom pallet. @@ -151,8 +135,11 @@ //! * Total issued balanced of all accounts should be less than `Config::Balance::max_value()`. //! * Existential Deposit is set to a value greater than zero. //! -//! Note, you may find the Balances pallet still functions with an ED of zero in some circumstances, -//! however this is not a configuration which is generally supported, nor will it be. +//! Note, you may find the Balances pallet still functions with an ED of zero when the +//! `insecure_zero_ed` cargo feature is enabled. However this is not a configuration which is +//! generally supported, nor will it be. +//! +//! [`frame_tokens`]: ../polkadot_sdk_docs/reference_docs/frame_tokens/index.html #![cfg_attr(not(feature = "std"), no_std)] mod benchmarking; @@ -308,10 +295,14 @@ pub mod pallet { /// The maximum number of locks that should exist on an account. /// Not strictly enforced, but used for weight estimation. + /// + /// Use of locks is deprecated in favour of freezes. See `https://github.com/paritytech/substrate/pull/12951/` #[pallet::constant] type MaxLocks: Get; /// The maximum number of named reserves that can exist on an account. + /// + /// Use of reserves is deprecated in favour of holds. See `https://github.com/paritytech/substrate/pull/12951/` #[pallet::constant] type MaxReserves: Get; @@ -455,6 +446,8 @@ pub mod pallet { /// Any liquidity locks on some account balances. /// NOTE: Should only be accessed when setting, changing and freeing a lock. + /// + /// Use of locks is deprecated in favour of freezes. See `https://github.com/paritytech/substrate/pull/12951/` #[pallet::storage] #[pallet::getter(fn locks)] pub type Locks, I: 'static = ()> = StorageMap< @@ -466,6 +459,8 @@ pub mod pallet { >; /// Named reserves on some account balances. + /// + /// Use of reserves is deprecated in favour of holds. See `https://github.com/paritytech/substrate/pull/12951/` #[pallet::storage] #[pallet::getter(fn reserves)] pub type Reserves, I: 'static = ()> = StorageMap< @@ -677,7 +672,7 @@ pub mod pallet { /// /// This will waive the transaction fee if at least all but 10% of the accounts needed to /// be upgraded. (We let some not have to be upgraded just in order to allow for the - /// possibililty of churn). + /// possibility of churn). #[pallet::call_index(6)] #[pallet::weight(T::WeightInfo::upgrade_accounts(who.len() as u32))] pub fn upgrade_accounts( @@ -905,14 +900,14 @@ pub mod pallet { Self::try_mutate_account(who, |a, _| -> Result { Ok(f(a)) }) } - /// Returns `true` when `who` has some providers or `insecure_zero_ed` feature is disnabled. + /// Returns `true` when `who` has some providers or `insecure_zero_ed` feature is disabled. /// Returns `false` otherwise. #[cfg(not(feature = "insecure_zero_ed"))] fn have_providers_or_no_zero_ed(_: &T::AccountId) -> bool { true } - /// Returns `true` when `who` has some providers or `insecure_zero_ed` feature is disnabled. + /// Returns `true` when `who` has some providers or `insecure_zero_ed` feature is disabled. /// Returns `false` otherwise. #[cfg(feature = "insecure_zero_ed")] fn have_providers_or_no_zero_ed(who: &T::AccountId) -> bool { diff --git a/substrate/frame/balances/src/tests/currency_tests.rs b/substrate/frame/balances/src/tests/currency_tests.rs index 46a4c4caefc3d72ad45adf58243f3430344d4813..450b1a84aa878552cdd8e9bd38918099b7edb463 100644 --- a/substrate/frame/balances/src/tests/currency_tests.rs +++ b/substrate/frame/balances/src/tests/currency_tests.rs @@ -24,8 +24,8 @@ use frame_support::{ BalanceStatus::{Free, Reserved}, Currency, ExistenceRequirement::{self, AllowDeath, KeepAlive}, - Hooks, LockIdentifier, LockableCurrency, NamedReservableCurrency, ReservableCurrency, - WithdrawReasons, + Hooks, InspectLockableCurrency, LockIdentifier, LockableCurrency, NamedReservableCurrency, + ReservableCurrency, WithdrawReasons, }, StorageNoopGuard, }; @@ -88,6 +88,24 @@ fn basic_locking_should_work() { }); } +#[test] +fn inspect_lock_should_work() { + ExtBuilder::default() + .existential_deposit(1) + .monied(true) + .build_and_execute_with(|| { + Balances::set_lock(ID_1, &1, 10, WithdrawReasons::all()); + Balances::set_lock(ID_2, &1, 10, WithdrawReasons::all()); + Balances::set_lock(ID_1, &2, 20, WithdrawReasons::all()); + + assert_eq!(>::balance_locked(ID_1, &1), 10); + assert_eq!(>::balance_locked(ID_2, &1), 10); + assert_eq!(>::balance_locked(ID_1, &2), 20); + assert_eq!(>::balance_locked(ID_2, &2), 0); + assert_eq!(>::balance_locked(ID_1, &3), 0); + }) +} + #[test] fn account_should_be_reaped() { ExtBuilder::default() @@ -1024,7 +1042,7 @@ fn slash_consumed_slash_partial_works() { } #[test] -fn slash_on_non_existant_works() { +fn slash_on_non_existent_works() { ExtBuilder::default().existential_deposit(100).build_and_execute_with(|| { // Slash on non-existent account is okay. assert_eq!(Balances::slash(&12345, 1_300), (NegativeImbalance::new(0), 1300)); @@ -1071,7 +1089,7 @@ fn slash_reserved_overslash_does_not_touch_free_balance() { } #[test] -fn slash_reserved_on_non_existant_works() { +fn slash_reserved_on_non_existent_works() { ExtBuilder::default().existential_deposit(100).build_and_execute_with(|| { // Slash on non-existent account is okay. assert_eq!(Balances::slash_reserved(&12345, 1_300), (NegativeImbalance::new(0), 1300)); diff --git a/substrate/frame/balances/src/tests/reentrancy_tests.rs b/substrate/frame/balances/src/tests/reentrancy_tests.rs index 1afbe82c7e2a7b2c9eec65e0ad2f34a23aa025a3..717f04978577f1d82f1c743cbac988a6820f90bc 100644 --- a/substrate/frame/balances/src/tests/reentrancy_tests.rs +++ b/substrate/frame/balances/src/tests/reentrancy_tests.rs @@ -44,7 +44,7 @@ fn transfer_dust_removal_tst1_should_work() { assert_eq!(Balances::free_balance(&2), 0); // As expected beneficiary account 3 - // received the transfered fund. + // received the transferred fund. assert_eq!(Balances::free_balance(&3), 450); // Dust balance is deposited to account 1 @@ -123,7 +123,7 @@ fn repatriating_reserved_balance_dust_removal_should_work() { // Reserve a value on account 2, // Such that free balance is lower than - // Exestintial deposit. + // Existential deposit. assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(2), 1, 450)); // Since free balance of account 2 is lower than diff --git a/substrate/frame/beefy-mmr/Cargo.toml b/substrate/frame/beefy-mmr/Cargo.toml index 17707731773118b7028f3cff8031982c0c5d704b..8fcb8e1d559bea2c5082e49cc0002d15745639c0 100644 --- a/substrate/frame/beefy-mmr/Cargo.toml +++ b/substrate/frame/beefy-mmr/Cargo.toml @@ -15,7 +15,7 @@ workspace = true array-bytes = { version = "6.1", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true, default-features = true } binary-merkle-tree = { path = "../../utils/binary-merkle-tree", default-features = false } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/beefy/Cargo.toml b/substrate/frame/beefy/Cargo.toml index e38eaa6fb0787aca48b07fad57e0ea0adffbe7e4..f181f4d41cdcb2567654d21b0fefb88b507d6f69 100644 --- a/substrate/frame/beefy/Cargo.toml +++ b/substrate/frame/beefy/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } serde = { optional = true, workspace = true, default-features = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/beefy/src/lib.rs b/substrate/frame/beefy/src/lib.rs index 87304eba8bab27a022d946c0e7d403ec9346dac4..09cd13ab70a4fc1089c794124fd8d7f6f24b7482 100644 --- a/substrate/frame/beefy/src/lib.rs +++ b/substrate/frame/beefy/src/lib.rs @@ -280,6 +280,14 @@ pub mod pallet { } } + #[pallet::hooks] + impl Hooks> for Pallet { + #[cfg(feature = "try-runtime")] + fn try_state(_n: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { + Self::do_try_state() + } + } + #[pallet::validate_unsigned] impl ValidateUnsigned for Pallet { type Call = Call; @@ -294,6 +302,60 @@ pub mod pallet { } } +#[cfg(any(feature = "try-runtime", test))] +impl Pallet { + /// Ensure the correctness of the state of this pallet. + /// + /// This should be valid before or after each state transition of this pallet. + pub fn do_try_state() -> Result<(), sp_runtime::TryRuntimeError> { + Self::try_state_authorities()?; + Self::try_state_validators()?; + + Ok(()) + } + + /// # Invariants + /// + /// * `Authorities` should not exceed the `MaxAuthorities` capacity. + /// * `NextAuthorities` should not exceed the `MaxAuthorities` capacity. + fn try_state_authorities() -> Result<(), sp_runtime::TryRuntimeError> { + if let Some(authorities_len) = >::decode_len() { + ensure!( + authorities_len as u32 <= T::MaxAuthorities::get(), + "Authorities number exceeds what the pallet config allows." + ); + } else { + return Err(sp_runtime::TryRuntimeError::Other( + "Failed to decode length of authorities", + )); + } + + if let Some(next_authorities_len) = >::decode_len() { + ensure!( + next_authorities_len as u32 <= T::MaxAuthorities::get(), + "Next authorities number exceeds what the pallet config allows." + ); + } else { + return Err(sp_runtime::TryRuntimeError::Other( + "Failed to decode length of next authorities", + )); + } + Ok(()) + } + + /// # Invariants + /// + /// `ValidatorSetId` must be present in `SetIdSession` + fn try_state_validators() -> Result<(), sp_runtime::TryRuntimeError> { + let validator_set_id = >::get(); + ensure!( + SetIdSession::::get(validator_set_id).is_some(), + "Validator set id must be present in SetIdSession" + ); + Ok(()) + } +} + impl Pallet { /// Return the current active BEEFY validator set. pub fn validator_set() -> Option> { diff --git a/substrate/frame/beefy/src/mock.rs b/substrate/frame/beefy/src/mock.rs index fccc63bd1b45a9aded2cfcfa7e38222174aa2ad8..1c55adc8de4b7d87535367e1f934c9db171b4902 100644 --- a/substrate/frame/beefy/src/mock.rs +++ b/substrate/frame/beefy/src/mock.rs @@ -27,7 +27,6 @@ use frame_support::{ }; use pallet_session::historical as pallet_session_historical; use sp_core::{crypto::KeyTypeId, ConstU128}; -use sp_io::TestExternalities; use sp_runtime::{ app_crypto::ecdsa::Public, curve::PiecewiseLinear, impl_opaque_keys, testing::TestXt, traits::OpaqueKeys, BuildStorage, Perbill, @@ -210,6 +209,73 @@ impl pallet_offences::Config for Test { type OnOffenceHandler = Staking; } +#[derive(Default)] +pub struct ExtBuilder { + authorities: Vec, +} + +impl ExtBuilder { + /// Add some AccountIds to insert into `List`. + #[cfg(test)] + pub(crate) fn add_authorities(mut self, ids: Vec) -> Self { + self.authorities = ids; + self + } + + pub fn build(self) -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + + let balances: Vec<_> = + (0..self.authorities.len()).map(|i| (i as u64, 10_000_000)).collect(); + + pallet_balances::GenesisConfig:: { balances } + .assimilate_storage(&mut t) + .unwrap(); + + let session_keys: Vec<_> = self + .authorities + .iter() + .enumerate() + .map(|(i, k)| (i as u64, i as u64, MockSessionKeys { dummy: k.clone() })) + .collect(); + + BasicExternalities::execute_with_storage(&mut t, || { + for (ref id, ..) in &session_keys { + frame_system::Pallet::::inc_providers(id); + } + }); + + pallet_session::GenesisConfig:: { keys: session_keys } + .assimilate_storage(&mut t) + .unwrap(); + + // controllers are same as stash + let stakers: Vec<_> = (0..self.authorities.len()) + .map(|i| (i as u64, i as u64, 10_000, pallet_staking::StakerStatus::::Validator)) + .collect(); + + let staking_config = pallet_staking::GenesisConfig:: { + stakers, + validator_count: 2, + force_era: pallet_staking::Forcing::ForceNew, + minimum_validator_count: 0, + invulnerables: vec![], + ..Default::default() + }; + + staking_config.assimilate_storage(&mut t).unwrap(); + + t.into() + } + + pub fn build_and_execute(self, test: impl FnOnce() -> ()) { + self.build().execute_with(|| { + test(); + Beefy::do_try_state().expect("All invariants must hold after a test"); + }) + } +} + // Note, that we can't use `UintAuthorityId` here. Reason is that the implementation // of `to_public_key()` assumes, that a public key is 32 bytes long. This is true for // ed25519 and sr25519 but *not* for ecdsa. A compressed ecdsa public key is 33 bytes, @@ -226,54 +292,6 @@ pub fn mock_authorities(vec: Vec) -> Vec { vec.into_iter().map(|id| mock_beefy_id(id)).collect() } -pub fn new_test_ext(ids: Vec) -> TestExternalities { - new_test_ext_raw_authorities(mock_authorities(ids)) -} - -pub fn new_test_ext_raw_authorities(authorities: Vec) -> TestExternalities { - let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - - let balances: Vec<_> = (0..authorities.len()).map(|i| (i as u64, 10_000_000)).collect(); - - pallet_balances::GenesisConfig:: { balances } - .assimilate_storage(&mut t) - .unwrap(); - - let session_keys: Vec<_> = authorities - .iter() - .enumerate() - .map(|(i, k)| (i as u64, i as u64, MockSessionKeys { dummy: k.clone() })) - .collect(); - - BasicExternalities::execute_with_storage(&mut t, || { - for (ref id, ..) in &session_keys { - frame_system::Pallet::::inc_providers(id); - } - }); - - pallet_session::GenesisConfig:: { keys: session_keys } - .assimilate_storage(&mut t) - .unwrap(); - - // controllers are same as stash - let stakers: Vec<_> = (0..authorities.len()) - .map(|i| (i as u64, i as u64, 10_000, pallet_staking::StakerStatus::::Validator)) - .collect(); - - let staking_config = pallet_staking::GenesisConfig:: { - stakers, - validator_count: 2, - force_era: pallet_staking::Forcing::ForceNew, - minimum_validator_count: 0, - invulnerables: vec![], - ..Default::default() - }; - - staking_config.assimilate_storage(&mut t).unwrap(); - - t.into() -} - pub fn start_session(session_index: SessionIndex) { for i in Session::current_index()..session_index { System::on_finalize(System::block_number()); diff --git a/substrate/frame/beefy/src/tests.rs b/substrate/frame/beefy/src/tests.rs index 2950264e0c317f59cb2638a1851660865fe2e71a..6a6aa245ce1f9051f7e310510c6110ce34f11bc9 100644 --- a/substrate/frame/beefy/src/tests.rs +++ b/substrate/frame/beefy/src/tests.rs @@ -47,7 +47,7 @@ fn genesis_session_initializes_authorities() { let authorities = mock_authorities(vec![1, 2, 3, 4]); let want = authorities.clone(); - new_test_ext_raw_authorities(authorities).execute_with(|| { + ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { let authorities = beefy::Authorities::::get(); assert_eq!(authorities.len(), 4); @@ -69,130 +69,140 @@ fn session_change_updates_authorities() { let authorities = mock_authorities(vec![1, 2, 3, 4]); let want_validators = authorities.clone(); - new_test_ext(vec![1, 2, 3, 4]).execute_with(|| { - assert!(0 == beefy::ValidatorSetId::::get()); + ExtBuilder::default() + .add_authorities(mock_authorities(vec![1, 2, 3, 4])) + .build_and_execute(|| { + assert!(0 == beefy::ValidatorSetId::::get()); - init_block(1); + init_block(1); - assert!(1 == beefy::ValidatorSetId::::get()); + assert!(1 == beefy::ValidatorSetId::::get()); - let want = beefy_log(ConsensusLog::AuthoritiesChange( - ValidatorSet::new(want_validators, 1).unwrap(), - )); + let want = beefy_log(ConsensusLog::AuthoritiesChange( + ValidatorSet::new(want_validators, 1).unwrap(), + )); - let log = System::digest().logs[0].clone(); - assert_eq!(want, log); + let log = System::digest().logs[0].clone(); + assert_eq!(want, log); - init_block(2); + init_block(2); - assert!(2 == beefy::ValidatorSetId::::get()); + assert!(2 == beefy::ValidatorSetId::::get()); - let want = beefy_log(ConsensusLog::AuthoritiesChange( - ValidatorSet::new(vec![mock_beefy_id(2), mock_beefy_id(4)], 2).unwrap(), - )); + let want = beefy_log(ConsensusLog::AuthoritiesChange( + ValidatorSet::new(vec![mock_beefy_id(2), mock_beefy_id(4)], 2).unwrap(), + )); - let log = System::digest().logs[1].clone(); - assert_eq!(want, log); - }); + let log = System::digest().logs[1].clone(); + assert_eq!(want, log); + }); } #[test] fn session_change_updates_next_authorities() { let want = vec![mock_beefy_id(1), mock_beefy_id(2), mock_beefy_id(3), mock_beefy_id(4)]; - new_test_ext(vec![1, 2, 3, 4]).execute_with(|| { - let next_authorities = beefy::NextAuthorities::::get(); + ExtBuilder::default() + .add_authorities(mock_authorities(vec![1, 2, 3, 4])) + .build_and_execute(|| { + let next_authorities = beefy::NextAuthorities::::get(); - assert_eq!(next_authorities.len(), 4); - assert_eq!(want[0], next_authorities[0]); - assert_eq!(want[1], next_authorities[1]); - assert_eq!(want[2], next_authorities[2]); - assert_eq!(want[3], next_authorities[3]); + assert_eq!(next_authorities.len(), 4); + assert_eq!(want[0], next_authorities[0]); + assert_eq!(want[1], next_authorities[1]); + assert_eq!(want[2], next_authorities[2]); + assert_eq!(want[3], next_authorities[3]); - init_block(1); + init_block(1); - let next_authorities = beefy::NextAuthorities::::get(); + let next_authorities = beefy::NextAuthorities::::get(); - assert_eq!(next_authorities.len(), 2); - assert_eq!(want[1], next_authorities[0]); - assert_eq!(want[3], next_authorities[1]); - }); + assert_eq!(next_authorities.len(), 2); + assert_eq!(want[1], next_authorities[0]); + assert_eq!(want[3], next_authorities[1]); + }); } #[test] fn validator_set_at_genesis() { let want = vec![mock_beefy_id(1), mock_beefy_id(2)]; - new_test_ext(vec![1, 2, 3, 4]).execute_with(|| { - let vs = Beefy::validator_set().unwrap(); + ExtBuilder::default() + .add_authorities(mock_authorities(vec![1, 2, 3, 4])) + .build_and_execute(|| { + let vs = Beefy::validator_set().unwrap(); - assert_eq!(vs.id(), 0u64); - assert_eq!(vs.validators()[0], want[0]); - assert_eq!(vs.validators()[1], want[1]); - }); + assert_eq!(vs.id(), 0u64); + assert_eq!(vs.validators()[0], want[0]); + assert_eq!(vs.validators()[1], want[1]); + }); } #[test] fn validator_set_updates_work() { let want = vec![mock_beefy_id(1), mock_beefy_id(2), mock_beefy_id(3), mock_beefy_id(4)]; - new_test_ext(vec![1, 2, 3, 4]).execute_with(|| { - let vs = Beefy::validator_set().unwrap(); - assert_eq!(vs.id(), 0u64); - assert_eq!(want[0], vs.validators()[0]); - assert_eq!(want[1], vs.validators()[1]); - assert_eq!(want[2], vs.validators()[2]); - assert_eq!(want[3], vs.validators()[3]); + ExtBuilder::default() + .add_authorities(mock_authorities(vec![1, 2, 3, 4])) + .build_and_execute(|| { + let vs = Beefy::validator_set().unwrap(); + assert_eq!(vs.id(), 0u64); + assert_eq!(want[0], vs.validators()[0]); + assert_eq!(want[1], vs.validators()[1]); + assert_eq!(want[2], vs.validators()[2]); + assert_eq!(want[3], vs.validators()[3]); - init_block(1); + init_block(1); - let vs = Beefy::validator_set().unwrap(); + let vs = Beefy::validator_set().unwrap(); - assert_eq!(vs.id(), 1u64); - assert_eq!(want[0], vs.validators()[0]); - assert_eq!(want[1], vs.validators()[1]); + assert_eq!(vs.id(), 1u64); + assert_eq!(want[0], vs.validators()[0]); + assert_eq!(want[1], vs.validators()[1]); - init_block(2); + init_block(2); - let vs = Beefy::validator_set().unwrap(); + let vs = Beefy::validator_set().unwrap(); - assert_eq!(vs.id(), 2u64); - assert_eq!(want[1], vs.validators()[0]); - assert_eq!(want[3], vs.validators()[1]); - }); + assert_eq!(vs.id(), 2u64); + assert_eq!(want[1], vs.validators()[0]); + assert_eq!(want[3], vs.validators()[1]); + }); } #[test] fn cleans_up_old_set_id_session_mappings() { - new_test_ext(vec![1, 2, 3, 4]).execute_with(|| { - let max_set_id_session_entries = MaxSetIdSessionEntries::get(); - - // we have 3 sessions per era - let era_limit = max_set_id_session_entries / 3; - // sanity check against division precision loss - assert_eq!(0, max_set_id_session_entries % 3); - // go through `max_set_id_session_entries` sessions - start_era(era_limit); - - // we should have a session id mapping for all the set ids from - // `max_set_id_session_entries` eras we have observed - for i in 1..=max_set_id_session_entries { - assert!(beefy::SetIdSession::::get(i as u64).is_some()); - } + ExtBuilder::default() + .add_authorities(mock_authorities(vec![1, 2, 3, 4])) + .build_and_execute(|| { + let max_set_id_session_entries = MaxSetIdSessionEntries::get(); + + // we have 3 sessions per era + let era_limit = max_set_id_session_entries / 3; + // sanity check against division precision loss + assert_eq!(0, max_set_id_session_entries % 3); + // go through `max_set_id_session_entries` sessions + start_era(era_limit); + + // we should have a session id mapping for all the set ids from + // `max_set_id_session_entries` eras we have observed + for i in 1..=max_set_id_session_entries { + assert!(beefy::SetIdSession::::get(i as u64).is_some()); + } - // go through another `max_set_id_session_entries` sessions - start_era(era_limit * 2); + // go through another `max_set_id_session_entries` sessions + start_era(era_limit * 2); - // we should keep tracking the new mappings for new sessions - for i in max_set_id_session_entries + 1..=max_set_id_session_entries * 2 { - assert!(beefy::SetIdSession::::get(i as u64).is_some()); - } + // we should keep tracking the new mappings for new sessions + for i in max_set_id_session_entries + 1..=max_set_id_session_entries * 2 { + assert!(beefy::SetIdSession::::get(i as u64).is_some()); + } - // but the old ones should have been pruned by now - for i in 1..=max_set_id_session_entries { - assert!(beefy::SetIdSession::::get(i as u64).is_none()); - } - }); + // but the old ones should have been pruned by now + for i in 1..=max_set_id_session_entries { + assert!(beefy::SetIdSession::::get(i as u64).is_none()); + } + }); } /// Returns a list with 3 authorities with known keys: @@ -259,7 +269,7 @@ fn should_sign_and_verify() { fn report_equivocation_current_set_works() { let authorities = test_authorities(); - new_test_ext_raw_authorities(authorities).execute_with(|| { + ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { assert_eq!(Staking::current_era(), Some(0)); assert_eq!(Session::current_index(), 0); @@ -339,7 +349,7 @@ fn report_equivocation_current_set_works() { fn report_equivocation_old_set_works() { let authorities = test_authorities(); - new_test_ext_raw_authorities(authorities).execute_with(|| { + ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { start_era(1); let block_num = System::block_number(); @@ -422,7 +432,7 @@ fn report_equivocation_old_set_works() { fn report_equivocation_invalid_set_id() { let authorities = test_authorities(); - new_test_ext_raw_authorities(authorities).execute_with(|| { + ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { start_era(1); let block_num = System::block_number(); @@ -460,7 +470,7 @@ fn report_equivocation_invalid_set_id() { fn report_equivocation_invalid_session() { let authorities = test_authorities(); - new_test_ext_raw_authorities(authorities).execute_with(|| { + ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { start_era(1); let block_num = System::block_number(); @@ -503,7 +513,7 @@ fn report_equivocation_invalid_session() { fn report_equivocation_invalid_key_owner_proof() { let authorities = test_authorities(); - new_test_ext_raw_authorities(authorities).execute_with(|| { + ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { start_era(1); let block_num = System::block_number(); @@ -551,7 +561,7 @@ fn report_equivocation_invalid_key_owner_proof() { fn report_equivocation_invalid_equivocation_proof() { let authorities = test_authorities(); - new_test_ext_raw_authorities(authorities).execute_with(|| { + ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { start_era(1); let block_num = System::block_number(); @@ -624,7 +634,7 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { let authorities = test_authorities(); - new_test_ext_raw_authorities(authorities).execute_with(|| { + ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { start_era(1); let block_num = System::block_number(); @@ -728,7 +738,7 @@ fn report_equivocation_has_valid_weight() { fn valid_equivocation_reports_dont_pay_fees() { let authorities = test_authorities(); - new_test_ext_raw_authorities(authorities).execute_with(|| { + ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { start_era(1); let block_num = System::block_number(); @@ -796,7 +806,7 @@ fn valid_equivocation_reports_dont_pay_fees() { fn set_new_genesis_works() { let authorities = test_authorities(); - new_test_ext_raw_authorities(authorities).execute_with(|| { + ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { start_era(1); let new_genesis_delay = 10u64; diff --git a/substrate/frame/benchmarking/Cargo.toml b/substrate/frame/benchmarking/Cargo.toml index bf42aae979cd0edd3a00ca98f8aacaba55d807dc..8210e8cfa626063a32b5528b5469f7029857299f 100644 --- a/substrate/frame/benchmarking/Cargo.toml +++ b/substrate/frame/benchmarking/Cargo.toml @@ -20,7 +20,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = linregress = { version = "0.5.1", optional = true } log = { workspace = true } paste = "1.0" -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true, default-features = true } frame-support = { path = "../support", default-features = false } frame-support-procedural = { path = "../support/procedural", default-features = false } diff --git a/substrate/frame/benchmarking/README.md b/substrate/frame/benchmarking/README.md index bf0bde2c3df5a5dcc85a6aa61f3c0af519bc3ccf..0b3680e1154664eda6050c75944af1391140bb61 100644 --- a/substrate/frame/benchmarking/README.md +++ b/substrate/frame/benchmarking/README.md @@ -177,8 +177,8 @@ requirements. The benchmarking CLI uses a Handlebars template to format the final output file. You can optionally pass the flag `--template` pointing to a custom template that can be used instead. Within the template, you have access to all the data provided by the `TemplateData` struct in the [benchmarking CLI -writer](../../utils/frame/benchmarking-cli/src/writer.rs). You can find the default template used -[here](../../utils/frame/benchmarking-cli/src/template.hbs). +writer](../../utils/frame/benchmarking-cli/src/pallet/writer.rs). You can find the default template used +[here](../../utils/frame/benchmarking-cli/src/pallet/template.hbs). There are some custom Handlebars helpers included with our output generation: diff --git a/substrate/frame/benchmarking/pov/Cargo.toml b/substrate/frame/benchmarking/pov/Cargo.toml index ce5daeb5b7b4c427e23eca59632f91507f571ae5..5d3aaa7890488018eb3715ed13845a7e3d130b50 100644 --- a/substrate/frame/benchmarking/pov/Cargo.toml +++ b/substrate/frame/benchmarking/pov/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "..", default-features = false } frame-support = { path = "../../support", default-features = false } frame-system = { path = "../../system", default-features = false } diff --git a/substrate/frame/benchmarking/src/analysis.rs b/substrate/frame/benchmarking/src/analysis.rs index 5fc3abb5a27f0f2c2bcaadc2c6e71fbd603c0470..987078ff79a8b432c1ca801ab79578cd2c27fc9d 100644 --- a/substrate/frame/benchmarking/src/analysis.rs +++ b/substrate/frame/benchmarking/src/analysis.rs @@ -41,7 +41,7 @@ pub enum BenchmarkSelector { /// Multiplies the value by 1000 and converts it into an u128. fn mul_1000_into_u128(value: f64) -> u128 { - // This is slighly more precise than the alternative of `(value * 1000.0) as u128`. + // This is slightly more precise than the alternative of `(value * 1000.0) as u128`. (value as u128) .saturating_mul(1000) .saturating_add((value.fract() * 1000.0) as u128) diff --git a/substrate/frame/benchmarking/src/lib.rs b/substrate/frame/benchmarking/src/lib.rs index f79582d03e51caaaf395048fa224484b026d32f9..d4ee0abbecce5be341881d73004c59bf71d9db97 100644 --- a/substrate/frame/benchmarking/src/lib.rs +++ b/substrate/frame/benchmarking/src/lib.rs @@ -203,7 +203,7 @@ pub use v1::*; /// ## Where Clause /// /// Some pallets require a where clause specifying constraints on their generics to make -/// writing benchmarks feasible. To accomodate this situation, you can provide such a where +/// writing benchmarks feasible. To accommodate this situation, you can provide such a where /// clause as the (only) argument to the `#[benchmarks]` or `#[instance_benchmarks]` attribute /// macros. Below is an example of this taken from the `message-queue` pallet. /// diff --git a/substrate/frame/benchmarking/src/v1.rs b/substrate/frame/benchmarking/src/v1.rs index 4ad8cc0edd46ccf554493aac274ca79c679eb642..b2449db3d67d0d336c765becbb8845b85a1a35d3 100644 --- a/substrate/frame/benchmarking/src/v1.rs +++ b/substrate/frame/benchmarking/src/v1.rs @@ -874,7 +874,7 @@ macro_rules! impl_bench_name_tests { $crate::BenchmarkError::Override(_) => { // This is still considered a success condition. $crate::__private::log::error!( - "WARNING: benchmark error overrided - {}", + "WARNING: benchmark error overridden - {}", stringify!($name), ); }, @@ -1704,7 +1704,7 @@ macro_rules! impl_test_function { $crate::BenchmarkError::Override(_) => { // This is still considered a success condition. $crate::__private::log::error!( - "WARNING: benchmark error overrided - {}", + "WARNING: benchmark error overridden - {}", $crate::__private::str::from_utf8(benchmark_name) .expect("benchmark name is always a valid string!"), ); @@ -1851,7 +1851,7 @@ macro_rules! add_benchmark { Err($crate::BenchmarkError::Override(mut result)) => { // Insert override warning as the first storage key. $crate::__private::log::error!( - "WARNING: benchmark error overrided - {}", + "WARNING: benchmark error overridden - {}", $crate::__private::str::from_utf8(benchmark) .expect("benchmark name is always a valid string!") ); diff --git a/substrate/frame/bounties/Cargo.toml b/substrate/frame/bounties/Cargo.toml index 191a38d20b2faf4a923bc074afd905c523c8a2c1..3307e47e9818934d629ff1989aff08ff03de9acf 100644 --- a/substrate/frame/bounties/Cargo.toml +++ b/substrate/frame/bounties/Cargo.toml @@ -20,7 +20,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/broker/Cargo.toml b/substrate/frame/broker/Cargo.toml index 31f9a6b63178d42410c16a3354f43ec35fe00f3d..3b6bd2019cc1770247d27da74ab2e52603ff2cf9 100644 --- a/substrate/frame/broker/Cargo.toml +++ b/substrate/frame/broker/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } bitvec = { version = "1.0.0", default-features = false } sp-std = { path = "../../primitives/std", default-features = false } sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } diff --git a/substrate/frame/broker/src/benchmarking.rs b/substrate/frame/broker/src/benchmarking.rs index 70f488e998cc8d2b06d26f2250ee8a585ab44add..98ac074ca91778cfd2538cc0e89f90ca79a62ad5 100644 --- a/substrate/frame/broker/src/benchmarking.rs +++ b/substrate/frame/broker/src/benchmarking.rs @@ -918,6 +918,23 @@ mod benches { Ok(()) } + #[benchmark] + fn swap_leases() -> Result<(), BenchmarkError> { + let admin_origin = + T::AdminOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + + // Add two leases in `Leases` + let n = (T::MaxLeasedCores::get() / 2) as usize; + let mut leases = vec![LeaseRecordItem { task: 1, until: 10u32.into() }; n]; + leases.extend(vec![LeaseRecordItem { task: 2, until: 20u32.into() }; n]); + Leases::::put(BoundedVec::try_from(leases).unwrap()); + + #[extrinsic_call] + _(admin_origin as T::RuntimeOrigin, 1, 2); + + Ok(()) + } + // Implements a test for each benchmark. Execute with: // `cargo test -p pallet-broker --features runtime-benchmarks`. impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); diff --git a/substrate/frame/broker/src/dispatchable_impls.rs b/substrate/frame/broker/src/dispatchable_impls.rs index f2451013251fba144270e4009bae8cb180653b2d..74cda9c4f4cb48cfe1aa8f819cd3094eb2ca6023 100644 --- a/substrate/frame/broker/src/dispatchable_impls.rs +++ b/substrate/frame/broker/src/dispatchable_impls.rs @@ -437,4 +437,22 @@ impl Pallet { Self::deposit_event(Event::AllowedRenewalDropped { core, when }); Ok(()) } + + pub(crate) fn do_swap_leases(id: TaskId, other: TaskId) -> DispatchResult { + let mut id_leases_count = 0; + let mut other_leases_count = 0; + Leases::::mutate(|leases| { + leases.iter_mut().for_each(|lease| { + if lease.task == id { + lease.task = other; + id_leases_count += 1; + } else if lease.task == other { + lease.task = id; + other_leases_count += 1; + } + }) + }); + + Ok(()) + } } diff --git a/substrate/frame/broker/src/lib.rs b/substrate/frame/broker/src/lib.rs index a669463aa02d97e40c4221107bf15d1d3d3525bb..b9b5e309ca91e9ba4ccc2cd6e4f83c6d12bd542e 100644 --- a/substrate/frame/broker/src/lib.rs +++ b/substrate/frame/broker/src/lib.rs @@ -683,7 +683,7 @@ pub mod pallet { /// - `origin`: Must be a Signed origin of the account which owns the Region `region_id`. /// - `region_id`: The Region which was assigned to the Pool. /// - `max_timeslices`: The maximum number of timeslices which should be processed. This may - /// effect the weight of the call but should be ideally made equivalant to the length of + /// effect the weight of the call but should be ideally made equivalent to the length of /// the Region `region_id`. If it is less than this, then further dispatches will be /// required with the `region_id` which makes up any remainders of the region to be /// collected. @@ -786,5 +786,13 @@ pub mod pallet { Self::do_notify_core_count(core_count)?; Ok(()) } + + #[pallet::call_index(99)] + #[pallet::weight(T::WeightInfo::swap_leases())] + pub fn swap_leases(origin: OriginFor, id: TaskId, other: TaskId) -> DispatchResult { + T::AdminOrigin::ensure_origin_or_root(origin)?; + Self::do_swap_leases(id, other)?; + Ok(()) + } } } diff --git a/substrate/frame/broker/src/types.rs b/substrate/frame/broker/src/types.rs index 7e9f351723a5d312fcd52dcf57d845ccea34a05a..e8119d29ef5d38e25060514d624713a6eedd11a8 100644 --- a/substrate/frame/broker/src/types.rs +++ b/substrate/frame/broker/src/types.rs @@ -55,7 +55,7 @@ pub enum Finality { pub struct RegionId { /// The timeslice at which this Region begins. pub begin: Timeslice, - /// The index of the Polakdot Core on which this Region will be scheduled. + /// The index of the Polkadot Core on which this Region will be scheduled. pub core: CoreIndex, /// The regularity parts in which this Region will be scheduled. pub mask: CoreMask, @@ -198,7 +198,7 @@ pub struct PoolIoRecord { /// The total change of the portion of the pool supplied by purchased Bulk Coretime, measured /// in Core Mask Bits. pub private: SignedCoreMaskBitCount, - /// The total change of the portion of the pool supplied by the Polkaot System, measured in + /// The total change of the portion of the pool supplied by the Polkadot System, measured in /// Core Mask Bits. pub system: SignedCoreMaskBitCount, } diff --git a/substrate/frame/broker/src/weights.rs b/substrate/frame/broker/src/weights.rs index a8f50eeee6e6ceaf284c1764a68411956753cfff..a8b9fb598b8b4b941ae27e1a18b4a5c61d3b2248 100644 --- a/substrate/frame/broker/src/weights.rs +++ b/substrate/frame/broker/src/weights.rs @@ -17,10 +17,10 @@ //! Autogenerated weights for `pallet_broker` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-04, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-pzhd7p6z-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -76,6 +76,7 @@ pub trait WeightInfo { fn request_revenue_info_at() -> Weight; fn notify_core_count() -> Weight; fn do_tick_base() -> Weight; + fn swap_leases() -> Weight; } /// Weights for `pallet_broker` using the Substrate node and recommended hardware. @@ -87,8 +88,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_040_000 picoseconds. - Weight::from_parts(3_344_000, 0) + // Minimum execution time: 2_865_000 picoseconds. + Weight::from_parts(3_061_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Broker::Reservations` (r:1 w:1) @@ -97,8 +98,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `5016` // Estimated: `7496` - // Minimum execution time: 21_259_000 picoseconds. - Weight::from_parts(22_110_000, 7496) + // Minimum execution time: 18_431_000 picoseconds. + Weight::from_parts(19_558_000, 7496) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -108,8 +109,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `6218` // Estimated: `7496` - // Minimum execution time: 20_330_000 picoseconds. - Weight::from_parts(20_826_000, 7496) + // Minimum execution time: 17_724_000 picoseconds. + Weight::from_parts(18_688_000, 7496) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -119,8 +120,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 13_411_000 picoseconds. - Weight::from_parts(13_960_000, 1526) + // Minimum execution time: 10_513_000 picoseconds. + Weight::from_parts(11_138_000, 1526) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -139,14 +140,12 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Broker::Workplan` (r:0 w:10) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn start_sales(n: u32, ) -> Weight { + fn start_sales(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `6330` // Estimated: `8499` - // Minimum execution time: 57_770_000 picoseconds. - Weight::from_parts(61_047_512, 8499) - // Standard Error: 165 - .saturating_add(Weight::from_parts(3, 0).saturating_mul(n.into())) + // Minimum execution time: 50_864_000 picoseconds. + Weight::from_parts(54_000_280, 8499) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(16_u64)) } @@ -162,10 +161,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) fn purchase() -> Weight { // Proof Size summary in bytes: - // Measured: `568` - // Estimated: `2053` - // Minimum execution time: 51_196_000 picoseconds. - Weight::from_parts(52_382_000, 2053) + // Measured: `635` + // Estimated: `2120` + // Minimum execution time: 43_630_000 picoseconds. + Weight::from_parts(44_622_000, 2120) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -185,10 +184,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn renew() -> Weight { // Proof Size summary in bytes: - // Measured: `686` + // Measured: `753` // Estimated: `4698` - // Minimum execution time: 71_636_000 picoseconds. - Weight::from_parts(73_679_000, 4698) + // Minimum execution time: 62_453_000 picoseconds. + Weight::from_parts(63_882_000, 4698) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -198,8 +197,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3550` - // Minimum execution time: 19_182_000 picoseconds. - Weight::from_parts(19_775_000, 3550) + // Minimum execution time: 17_237_000 picoseconds. + Weight::from_parts(17_757_000, 3550) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -209,21 +208,21 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3550` - // Minimum execution time: 20_688_000 picoseconds. - Weight::from_parts(21_557_000, 3550) + // Minimum execution time: 18_504_000 picoseconds. + Weight::from_parts(19_273_000, 3550) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: `Broker::Regions` (r:1 w:2) + /// Storage: `Broker::Regions` (r:1 w:3) /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) fn interlace() -> Weight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3550` - // Minimum execution time: 21_190_000 picoseconds. - Weight::from_parts(22_215_000, 3550) + // Minimum execution time: 20_477_000 picoseconds. + Weight::from_parts(21_328_000, 3550) .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Broker::Configuration` (r:1 w:0) /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) @@ -237,8 +236,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `740` // Estimated: `4681` - // Minimum execution time: 34_591_000 picoseconds. - Weight::from_parts(36_227_000, 4681) + // Minimum execution time: 31_815_000 picoseconds. + Weight::from_parts(32_700_000, 4681) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -256,8 +255,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `775` // Estimated: `5996` - // Minimum execution time: 40_346_000 picoseconds. - Weight::from_parts(41_951_000, 5996) + // Minimum execution time: 38_313_000 picoseconds. + Weight::from_parts(38_985_000, 5996) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -272,10 +271,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `859` // Estimated: `6196 + m * (2520 ±0)` - // Minimum execution time: 75_734_000 picoseconds. - Weight::from_parts(78_168_395, 6196) - // Standard Error: 63_180 - .saturating_add(Weight::from_parts(1_076_259, 0).saturating_mul(m.into())) + // Minimum execution time: 70_170_000 picoseconds. + Weight::from_parts(71_245_388, 6196) + // Standard Error: 54_382 + .saturating_add(Weight::from_parts(1_488_794, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(T::DbWeight::get().writes(5_u64)) @@ -287,8 +286,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3593` - // Minimum execution time: 46_383_000 picoseconds. - Weight::from_parts(47_405_000, 3593) + // Minimum execution time: 43_414_000 picoseconds. + Weight::from_parts(44_475_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -300,8 +299,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `603` // Estimated: `3550` - // Minimum execution time: 30_994_000 picoseconds. - Weight::from_parts(31_979_000, 3550) + // Minimum execution time: 31_327_000 picoseconds. + Weight::from_parts(32_050_000, 3550) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -315,8 +314,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `601` // Estimated: `3533` - // Minimum execution time: 37_584_000 picoseconds. - Weight::from_parts(44_010_000, 3533) + // Minimum execution time: 41_315_000 picoseconds. + Weight::from_parts(42_421_000, 3533) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -330,10 +329,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn drop_history() -> Weight { // Proof Size summary in bytes: - // Measured: `830` + // Measured: `995` // Estimated: `3593` - // Minimum execution time: 45_266_000 picoseconds. - Weight::from_parts(48_000_000, 3593) + // Minimum execution time: 49_707_000 picoseconds. + Weight::from_parts(51_516_000, 3593) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -343,10 +342,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Broker::AllowedRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) fn drop_renewal() -> Weight { // Proof Size summary in bytes: - // Measured: `525` + // Measured: `661` // Estimated: `4698` - // Minimum execution time: 25_365_000 picoseconds. - Weight::from_parts(26_920_000, 4698) + // Minimum execution time: 26_207_000 picoseconds. + Weight::from_parts(27_227_000, 4698) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -355,22 +354,22 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_519_000 picoseconds. - Weight::from_parts(7_098_698, 0) - // Standard Error: 20 - .saturating_add(Weight::from_parts(8, 0).saturating_mul(n.into())) + // Minimum execution time: 4_670_000 picoseconds. + Weight::from_parts(5_170_450, 0) + // Standard Error: 16 + .saturating_add(Weight::from_parts(37, 0).saturating_mul(n.into())) } - /// Storage: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) + /// Storage: `Broker::CoreCountInbox` (r:1 w:1) + /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. fn process_core_count(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `98` - // Estimated: `3563` - // Minimum execution time: 7_608_000 picoseconds. - Weight::from_parts(8_157_815, 3563) - // Standard Error: 26 - .saturating_add(Weight::from_parts(48, 0).saturating_mul(n.into())) + // Measured: `404` + // Estimated: `1487` + // Minimum execution time: 6_916_000 picoseconds. + Weight::from_parts(7_485_053, 1487) + // Standard Error: 23 + .saturating_add(Weight::from_parts(30, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -386,10 +385,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn process_revenue() -> Weight { // Proof Size summary in bytes: - // Measured: `905` - // Estimated: `4370` - // Minimum execution time: 59_993_000 picoseconds. - Weight::from_parts(61_752_000, 4370) + // Measured: `972` + // Estimated: `4437` + // Minimum execution time: 50_987_000 picoseconds. + Weight::from_parts(52_303_000, 4437) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -408,10 +407,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `6281` // Estimated: `8499` - // Minimum execution time: 41_863_000 picoseconds. - Weight::from_parts(44_033_031, 8499) - // Standard Error: 116 - .saturating_add(Weight::from_parts(764, 0).saturating_mul(n.into())) + // Minimum execution time: 38_334_000 picoseconds. + Weight::from_parts(40_517_609, 8499) + // Standard Error: 90 + .saturating_add(Weight::from_parts(338, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(15_u64)) } @@ -423,8 +422,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3493` - // Minimum execution time: 9_588_000 picoseconds. - Weight::from_parts(9_925_000, 3493) + // Minimum execution time: 7_850_000 picoseconds. + Weight::from_parts(8_157_000, 3493) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -436,8 +435,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1423` // Estimated: `4681` - // Minimum execution time: 19_308_000 picoseconds. - Weight::from_parts(20_482_000, 4681) + // Minimum execution time: 17_313_000 picoseconds. + Weight::from_parts(17_727_000, 4681) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -445,28 +444,46 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 147_000 picoseconds. - Weight::from_parts(184_000, 0) + // Minimum execution time: 171_000 picoseconds. + Weight::from_parts(196_000, 0) } + /// Storage: `Broker::CoreCountInbox` (r:0 w:1) + /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) fn notify_core_count() -> Weight { - T::DbWeight::get().reads_writes(1, 1) + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_413_000 picoseconds. + Weight::from_parts(2_587_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Broker::Status` (r:1 w:1) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::Configuration` (r:1 w:0) /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) + /// Storage: `Broker::CoreCountInbox` (r:1 w:0) + /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) fn do_tick_base() -> Weight { // Proof Size summary in bytes: - // Measured: `699` - // Estimated: `4164` - // Minimum execution time: 19_824_000 picoseconds. - Weight::from_parts(20_983_000, 4164) + // Measured: `603` + // Estimated: `4068` + // Minimum execution time: 13_121_000 picoseconds. + Weight::from_parts(13_685_000, 4068) .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `Broker::Leases` (r:1 w:1) + /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(41), added: 536, mode: `MaxEncodedLen`) + fn swap_leases() -> Weight { + // Proof Size summary in bytes: + // Measured: `239` + // Estimated: `1526` + // Minimum execution time: 6_847_000 picoseconds. + Weight::from_parts(7_185_000, 1526) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) } } @@ -478,8 +495,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_040_000 picoseconds. - Weight::from_parts(3_344_000, 0) + // Minimum execution time: 2_865_000 picoseconds. + Weight::from_parts(3_061_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Broker::Reservations` (r:1 w:1) @@ -488,8 +505,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `5016` // Estimated: `7496` - // Minimum execution time: 21_259_000 picoseconds. - Weight::from_parts(22_110_000, 7496) + // Minimum execution time: 18_431_000 picoseconds. + Weight::from_parts(19_558_000, 7496) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -499,8 +516,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `6218` // Estimated: `7496` - // Minimum execution time: 20_330_000 picoseconds. - Weight::from_parts(20_826_000, 7496) + // Minimum execution time: 17_724_000 picoseconds. + Weight::from_parts(18_688_000, 7496) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -510,8 +527,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 13_411_000 picoseconds. - Weight::from_parts(13_960_000, 1526) + // Minimum execution time: 10_513_000 picoseconds. + Weight::from_parts(11_138_000, 1526) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -530,14 +547,12 @@ impl WeightInfo for () { /// Storage: `Broker::Workplan` (r:0 w:10) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn start_sales(n: u32, ) -> Weight { + fn start_sales(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `6330` // Estimated: `8499` - // Minimum execution time: 57_770_000 picoseconds. - Weight::from_parts(61_047_512, 8499) - // Standard Error: 165 - .saturating_add(Weight::from_parts(3, 0).saturating_mul(n.into())) + // Minimum execution time: 50_864_000 picoseconds. + Weight::from_parts(54_000_280, 8499) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(16_u64)) } @@ -553,10 +568,10 @@ impl WeightInfo for () { /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) fn purchase() -> Weight { // Proof Size summary in bytes: - // Measured: `568` - // Estimated: `2053` - // Minimum execution time: 51_196_000 picoseconds. - Weight::from_parts(52_382_000, 2053) + // Measured: `635` + // Estimated: `2120` + // Minimum execution time: 43_630_000 picoseconds. + Weight::from_parts(44_622_000, 2120) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -576,10 +591,10 @@ impl WeightInfo for () { /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn renew() -> Weight { // Proof Size summary in bytes: - // Measured: `686` + // Measured: `753` // Estimated: `4698` - // Minimum execution time: 71_636_000 picoseconds. - Weight::from_parts(73_679_000, 4698) + // Minimum execution time: 62_453_000 picoseconds. + Weight::from_parts(63_882_000, 4698) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -589,8 +604,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3550` - // Minimum execution time: 19_182_000 picoseconds. - Weight::from_parts(19_775_000, 3550) + // Minimum execution time: 17_237_000 picoseconds. + Weight::from_parts(17_757_000, 3550) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -600,21 +615,21 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3550` - // Minimum execution time: 20_688_000 picoseconds. - Weight::from_parts(21_557_000, 3550) + // Minimum execution time: 18_504_000 picoseconds. + Weight::from_parts(19_273_000, 3550) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: `Broker::Regions` (r:1 w:2) + /// Storage: `Broker::Regions` (r:1 w:3) /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) fn interlace() -> Weight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3550` - // Minimum execution time: 21_190_000 picoseconds. - Weight::from_parts(22_215_000, 3550) + // Minimum execution time: 20_477_000 picoseconds. + Weight::from_parts(21_328_000, 3550) .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Broker::Configuration` (r:1 w:0) /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) @@ -628,8 +643,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `740` // Estimated: `4681` - // Minimum execution time: 34_591_000 picoseconds. - Weight::from_parts(36_227_000, 4681) + // Minimum execution time: 31_815_000 picoseconds. + Weight::from_parts(32_700_000, 4681) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -647,8 +662,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `775` // Estimated: `5996` - // Minimum execution time: 40_346_000 picoseconds. - Weight::from_parts(41_951_000, 5996) + // Minimum execution time: 38_313_000 picoseconds. + Weight::from_parts(38_985_000, 5996) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -663,10 +678,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `859` // Estimated: `6196 + m * (2520 ±0)` - // Minimum execution time: 75_734_000 picoseconds. - Weight::from_parts(78_168_395, 6196) - // Standard Error: 63_180 - .saturating_add(Weight::from_parts(1_076_259, 0).saturating_mul(m.into())) + // Minimum execution time: 70_170_000 picoseconds. + Weight::from_parts(71_245_388, 6196) + // Standard Error: 54_382 + .saturating_add(Weight::from_parts(1_488_794, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(RocksDbWeight::get().writes(5_u64)) @@ -678,8 +693,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3593` - // Minimum execution time: 46_383_000 picoseconds. - Weight::from_parts(47_405_000, 3593) + // Minimum execution time: 43_414_000 picoseconds. + Weight::from_parts(44_475_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -691,8 +706,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `603` // Estimated: `3550` - // Minimum execution time: 30_994_000 picoseconds. - Weight::from_parts(31_979_000, 3550) + // Minimum execution time: 31_327_000 picoseconds. + Weight::from_parts(32_050_000, 3550) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -706,8 +721,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `601` // Estimated: `3533` - // Minimum execution time: 37_584_000 picoseconds. - Weight::from_parts(44_010_000, 3533) + // Minimum execution time: 41_315_000 picoseconds. + Weight::from_parts(42_421_000, 3533) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -721,10 +736,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn drop_history() -> Weight { // Proof Size summary in bytes: - // Measured: `830` + // Measured: `995` // Estimated: `3593` - // Minimum execution time: 45_266_000 picoseconds. - Weight::from_parts(48_000_000, 3593) + // Minimum execution time: 49_707_000 picoseconds. + Weight::from_parts(51_516_000, 3593) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -734,10 +749,10 @@ impl WeightInfo for () { /// Proof: `Broker::AllowedRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) fn drop_renewal() -> Weight { // Proof Size summary in bytes: - // Measured: `525` + // Measured: `661` // Estimated: `4698` - // Minimum execution time: 25_365_000 picoseconds. - Weight::from_parts(26_920_000, 4698) + // Minimum execution time: 26_207_000 picoseconds. + Weight::from_parts(27_227_000, 4698) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -746,22 +761,22 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_519_000 picoseconds. - Weight::from_parts(7_098_698, 0) - // Standard Error: 20 - .saturating_add(Weight::from_parts(8, 0).saturating_mul(n.into())) + // Minimum execution time: 4_670_000 picoseconds. + Weight::from_parts(5_170_450, 0) + // Standard Error: 16 + .saturating_add(Weight::from_parts(37, 0).saturating_mul(n.into())) } - /// Storage: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) + /// Storage: `Broker::CoreCountInbox` (r:1 w:1) + /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. fn process_core_count(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `98` - // Estimated: `3563` - // Minimum execution time: 7_608_000 picoseconds. - Weight::from_parts(8_157_815, 3563) - // Standard Error: 26 - .saturating_add(Weight::from_parts(48, 0).saturating_mul(n.into())) + // Measured: `404` + // Estimated: `1487` + // Minimum execution time: 6_916_000 picoseconds. + Weight::from_parts(7_485_053, 1487) + // Standard Error: 23 + .saturating_add(Weight::from_parts(30, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -777,10 +792,10 @@ impl WeightInfo for () { /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn process_revenue() -> Weight { // Proof Size summary in bytes: - // Measured: `905` - // Estimated: `4370` - // Minimum execution time: 59_993_000 picoseconds. - Weight::from_parts(61_752_000, 4370) + // Measured: `972` + // Estimated: `4437` + // Minimum execution time: 50_987_000 picoseconds. + Weight::from_parts(52_303_000, 4437) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -799,10 +814,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `6281` // Estimated: `8499` - // Minimum execution time: 41_863_000 picoseconds. - Weight::from_parts(44_033_031, 8499) - // Standard Error: 116 - .saturating_add(Weight::from_parts(764, 0).saturating_mul(n.into())) + // Minimum execution time: 38_334_000 picoseconds. + Weight::from_parts(40_517_609, 8499) + // Standard Error: 90 + .saturating_add(Weight::from_parts(338, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(15_u64)) } @@ -814,8 +829,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3493` - // Minimum execution time: 9_588_000 picoseconds. - Weight::from_parts(9_925_000, 3493) + // Minimum execution time: 7_850_000 picoseconds. + Weight::from_parts(8_157_000, 3493) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -827,8 +842,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1423` // Estimated: `4681` - // Minimum execution time: 19_308_000 picoseconds. - Weight::from_parts(20_482_000, 4681) + // Minimum execution time: 17_313_000 picoseconds. + Weight::from_parts(17_727_000, 4681) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -836,28 +851,45 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 147_000 picoseconds. - Weight::from_parts(184_000, 0) + // Minimum execution time: 171_000 picoseconds. + Weight::from_parts(196_000, 0) } + /// Storage: `Broker::CoreCountInbox` (r:0 w:1) + /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) fn notify_core_count() -> Weight { - RocksDbWeight::get().reads(1) - .saturating_add(RocksDbWeight::get().writes(1)) + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_413_000 picoseconds. + Weight::from_parts(2_587_000, 0) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Broker::Status` (r:1 w:1) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::Configuration` (r:1 w:0) /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) + /// Storage: `Broker::CoreCountInbox` (r:1 w:0) + /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) fn do_tick_base() -> Weight { // Proof Size summary in bytes: - // Measured: `699` - // Estimated: `4164` - // Minimum execution time: 19_824_000 picoseconds. - Weight::from_parts(20_983_000, 4164) + // Measured: `603` + // Estimated: `4068` + // Minimum execution time: 13_121_000 picoseconds. + Weight::from_parts(13_685_000, 4068) .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `Broker::Leases` (r:1 w:1) + /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(41), added: 536, mode: `MaxEncodedLen`) + fn swap_leases() -> Weight { + // Proof Size summary in bytes: + // Measured: `239` + // Estimated: `1526` + // Minimum execution time: 6_847_000 picoseconds. + Weight::from_parts(7_185_000, 1526) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/substrate/frame/child-bounties/Cargo.toml b/substrate/frame/child-bounties/Cargo.toml index 589ca95a7516148531434fc641af40f0f36796bb..14a5e25e13da6a246e3935c90839a350bf773d2e 100644 --- a/substrate/frame/child-bounties/Cargo.toml +++ b/substrate/frame/child-bounties/Cargo.toml @@ -20,7 +20,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/collective/Cargo.toml b/substrate/frame/collective/Cargo.toml index e19e1496e7b5016819b1efaede408b19ae013c48..850390409abc9646afbdb058dd2392d9f35869aa 100644 --- a/substrate/frame/collective/Cargo.toml +++ b/substrate/frame/collective/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/collective/README.md b/substrate/frame/collective/README.md index 444927e51da22898c893b668328521606ba8a9a4..e860edbd484bb872bd241e13cf80f4928a278307 100644 --- a/substrate/frame/collective/README.md +++ b/substrate/frame/collective/README.md @@ -9,7 +9,7 @@ calculations, but enforces this neither in `set_members` nor in `change_members_ A "prime" member may be set to help determine the default vote behavior based on chain config. If `PrimeDefaultVote` is used, the prime vote acts as the default vote in case of any abstentions after the voting period. If `MoreThanMajorityThenPrimeDefaultVote` is used, then -abstentations will first follow the majority of the collective voting, and then the prime +abstentions will first follow the majority of the collective voting, and then the prime member. Voting happens through motions comprising a proposal (i.e. a dispatchable) plus a diff --git a/substrate/frame/collective/src/lib.rs b/substrate/frame/collective/src/lib.rs index 882e99a6d0051306a103630a893d9fb44b803072..d0009d02f68c2f0d26e5847221c85ede39e9e633 100644 --- a/substrate/frame/collective/src/lib.rs +++ b/substrate/frame/collective/src/lib.rs @@ -283,7 +283,7 @@ pub mod pallet { pub type Members, I: 'static = ()> = StorageValue<_, Vec, ValueQuery>; - /// The prime member that helps determine the default vote behavior in case of absentations. + /// The prime member that helps determine the default vote behavior in case of abstentions. #[pallet::storage] pub type Prime, I: 'static = ()> = StorageValue<_, T::AccountId, OptionQuery>; diff --git a/substrate/frame/collective/src/tests.rs b/substrate/frame/collective/src/tests.rs index 8a80dd167e3d2929e77255c33f63c6a61d8fc086..5240dc215ff7b19b774dc32e2a33092e137d6d28 100644 --- a/substrate/frame/collective/src/tests.rs +++ b/substrate/frame/collective/src/tests.rs @@ -995,7 +995,7 @@ fn motions_all_first_vote_free_works() { Collective::vote(RuntimeOrigin::signed(3), hash, 0, false); assert_eq!(vote_rval.unwrap().pays_fee, Pays::Yes); - // Test close() Extrincis | Check DispatchResultWithPostInfo with Pay Info + // Test close() Extrinsics | Check DispatchResultWithPostInfo with Pay Info let proposal_weight = proposal.get_dispatch_info().weight; let close_rval: DispatchResultWithPostInfo = diff --git a/substrate/frame/contracts/Cargo.toml b/substrate/frame/contracts/Cargo.toml index be3bafcd23fa107b12005ebe45da874e2a55e16b..d963ac261d1940fc9fa4d88101df2ca35cf99c43 100644 --- a/substrate/frame/contracts/Cargo.toml +++ b/substrate/frame/contracts/Cargo.toml @@ -18,12 +18,13 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +paste = { version = "1.0", default-features = false } bitflags = "1.3" codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", "max-encoded-len", ] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } log = { workspace = true } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } smallvec = { version = "1", default-features = false, features = [ @@ -57,7 +58,7 @@ xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/x [dev-dependencies] array-bytes = "6.1" assert_matches = "1" -env_logger = "0.9" +env_logger = "0.11" pretty_assertions = "1" wat = "1" pallet-contracts-fixtures = { path = "./fixtures" } diff --git a/substrate/frame/contracts/README.md b/substrate/frame/contracts/README.md index 6e817292e66c50d5766d5406cca6c053a25c1362..09dc770300ca8491af062bbde9e820deccdd1341 100644 --- a/substrate/frame/contracts/README.md +++ b/substrate/frame/contracts/README.md @@ -59,7 +59,7 @@ In general, a contract execution needs to be deterministic so that all nodes com it. To that end we disallow any instructions that could cause indeterminism. Most notable are any floating point arithmetic. That said, sometimes contracts are executed off-chain and hence are not subject to consensus. If code is only executed by a single node and implicitly trusted by other actors is such a case. Trusted execution environments -come to mind. To that end we allow the execution of indeterminstic code for off-chain usages with the following +come to mind. To that end we allow the execution of indeterministic code for off-chain usages with the following constraints: 1. No contract can ever be instantiated from an indeterministic code. The only way to execute the code is to use a diff --git a/substrate/frame/contracts/fixtures/contracts/multi_store.rs b/substrate/frame/contracts/fixtures/contracts/multi_store.rs index b83f3995a42b4905383f8629c04d25902e03374e..a78115f0148888d601893322fe04a4ae07fce269 100644 --- a/substrate/frame/contracts/fixtures/contracts/multi_store.rs +++ b/substrate/frame/contracts/fixtures/contracts/multi_store.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Does two stores to two seperate storage items +//! Does two stores to two separate storage items #![no_std] #![no_main] diff --git a/substrate/frame/contracts/mock-network/Cargo.toml b/substrate/frame/contracts/mock-network/Cargo.toml index 0c4cd1356f4dfddb5c8b3e194131a1de89693e89..387c3ca39d049f7674541e24a36b300683326260 100644 --- a/substrate/frame/contracts/mock-network/Cargo.toml +++ b/substrate/frame/contracts/mock-network/Cargo.toml @@ -30,7 +30,7 @@ pallet-xcm = { path = "../../../../polkadot/xcm/pallet-xcm", default-features = polkadot-parachain-primitives = { path = "../../../../polkadot/parachain" } polkadot-primitives = { path = "../../../../polkadot/primitives" } polkadot-runtime-parachains = { path = "../../../../polkadot/runtime/parachains" } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-api = { path = "../../../primitives/api", default-features = false } sp-core = { path = "../../../primitives/core", default-features = false } sp-io = { path = "../../../primitives/io", default-features = false } diff --git a/substrate/frame/contracts/mock-network/src/relay_chain.rs b/substrate/frame/contracts/mock-network/src/relay_chain.rs index e2a8d3d1337bb6901d4f0b24d16198595accba1b..470304ed357ec5594a2a66f795f838bc8be7740b 100644 --- a/substrate/frame/contracts/mock-network/src/relay_chain.rs +++ b/substrate/frame/contracts/mock-network/src/relay_chain.rs @@ -225,6 +225,7 @@ impl pallet_message_queue::Config for Runtime { type HeapSize = MessageQueueHeapSize; type MaxStale = MessageQueueMaxStale; type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = (); type MessageProcessor = MessageProcessor; type QueueChangeHandler = (); type WeightInfo = (); diff --git a/substrate/frame/contracts/mock-network/src/tests.rs b/substrate/frame/contracts/mock-network/src/tests.rs index d22221fe8ee051182471249182d54b02b7f47c2c..39aa9bebc0f5943627ae40a109dab357ae19dd07 100644 --- a/substrate/frame/contracts/mock-network/src/tests.rs +++ b/substrate/frame/contracts/mock-network/src/tests.rs @@ -23,7 +23,6 @@ use crate::{ }; use codec::{Decode, Encode}; use frame_support::{ - assert_err, pallet_prelude::Weight, traits::{fungibles::Mutate, Currency}, }; @@ -102,7 +101,7 @@ fn test_xcm_execute() { 0, Weight::MAX, None, - VersionedXcm::V4(message).encode(), + VersionedXcm::V4(message).encode().encode(), DebugInfo::UnsafeDebug, CollectEvents::UnsafeCollect, Determinism::Enforced, @@ -146,7 +145,7 @@ fn test_xcm_execute_incomplete() { 0, Weight::MAX, None, - VersionedXcm::V4(message).encode(), + VersionedXcm::V4(message).encode().encode(), DebugInfo::UnsafeDebug, CollectEvents::UnsafeCollect, Determinism::Enforced, @@ -160,37 +159,6 @@ fn test_xcm_execute_incomplete() { }); } -#[test] -fn test_xcm_execute_filtered_call() { - MockNet::reset(); - - let contract_addr = instantiate_test_contract("xcm_execute"); - - ParaA::execute_with(|| { - // `remark` should be rejected, as it is not allowed by our CallFilter. - let call = parachain::RuntimeCall::System(frame_system::Call::remark { remark: vec![] }); - let message: Xcm = Xcm(vec![Transact { - origin_kind: OriginKind::Native, - require_weight_at_most: Weight::MAX, - call: call.encode().into(), - }]); - - let result = ParachainContracts::bare_call( - ALICE, - contract_addr.clone(), - 0, - Weight::MAX, - None, - VersionedXcm::V4(message).encode(), - DebugInfo::UnsafeDebug, - CollectEvents::UnsafeCollect, - Determinism::Enforced, - ); - - assert_err!(result.result, frame_system::Error::::CallFiltered); - }); -} - #[test] fn test_xcm_execute_reentrant_call() { MockNet::reset(); @@ -222,7 +190,7 @@ fn test_xcm_execute_reentrant_call() { 0, Weight::MAX, None, - VersionedXcm::V4(message).encode(), + VersionedXcm::V4(message).encode().encode(), DebugInfo::UnsafeDebug, CollectEvents::UnsafeCollect, Determinism::Enforced, @@ -258,7 +226,7 @@ fn test_xcm_send() { 0, Weight::MAX, None, - (dest, message).encode(), + (dest, message.encode()).encode(), DebugInfo::UnsafeDebug, CollectEvents::UnsafeCollect, Determinism::Enforced, diff --git a/substrate/frame/contracts/src/benchmarking/code.rs b/substrate/frame/contracts/src/benchmarking/code.rs index 96ce11f8c4183a53d7e8e75fc0f6d826e861c54b..b97cf168e26d4562e4f4a3b7f07ad243174e7a98 100644 --- a/substrate/frame/contracts/src/benchmarking/code.rs +++ b/substrate/frame/contracts/src/benchmarking/code.rs @@ -164,7 +164,7 @@ impl From for WasmModule { // Grant access to linear memory. // Every contract module is required to have an imported memory. - // If no memory is specified in the passed ModuleDefenition, then + // If no memory is specified in the passed ModuleDefinition, then // default to (1, 1). let (init, max) = if let Some(memory) = &def.memory { (memory.min_pages, Some(memory.max_pages)) diff --git a/substrate/frame/contracts/src/benchmarking/mod.rs b/substrate/frame/contracts/src/benchmarking/mod.rs index ce2cf15d812c90de9cb1578c9c1cae39515fd8cf..9fb107537ba07945dbfaebdce48555dc1103864f 100644 --- a/substrate/frame/contracts/src/benchmarking/mod.rs +++ b/substrate/frame/contracts/src/benchmarking/mod.rs @@ -340,7 +340,7 @@ mod benchmarks { assert_eq!(StorageVersion::get::>(), version); } - // This benchmarks the weight of dispatching migrate to execute 1 `NoopMigraton` + // This benchmarks the weight of dispatching migrate to execute 1 `NoopMigration` #[benchmark(pov_mode = Measured)] fn migrate() { let latest_version = LATEST_MIGRATION_VERSION; @@ -1859,7 +1859,7 @@ mod benchmarks { // We call unique accounts. // - // This is a slow call: We redeuce the number of runs. + // This is a slow call: We reduce the number of runs. #[benchmark(pov_mode = Measured)] fn seal_call(r: Linear<0, { API_BENCHMARK_RUNS / 2 }>) -> Result<(), BenchmarkError> { let dummy_code = WasmModule::::dummy_with_bytes(0); @@ -1937,7 +1937,7 @@ mod benchmarks { Ok(()) } - // This is a slow call: We redeuce the number of runs. + // This is a slow call: We reduce the number of runs. #[benchmark(pov_mode = Measured)] fn seal_delegate_call(r: Linear<0, { API_BENCHMARK_RUNS / 2 }>) -> Result<(), BenchmarkError> { let hashes = (0..r) @@ -2473,7 +2473,7 @@ mod benchmarks { // Only calling the function itself for the list of // generated different ECDSA keys. - // This is a slow call: We redeuce the number of runs. + // This is a slow call: We reduce the number of runs. #[benchmark(pov_mode = Measured)] fn seal_ecdsa_to_eth_address( r: Linear<0, { API_BENCHMARK_RUNS / 10 }>, diff --git a/substrate/frame/contracts/src/exec.rs b/substrate/frame/contracts/src/exec.rs index d24657b65b204bd8f49a7dc1e3a000e4c8eb50b0..41a0383811fdb09afe7655d0418039c714cff1a5 100644 --- a/substrate/frame/contracts/src/exec.rs +++ b/substrate/frame/contracts/src/exec.rs @@ -531,9 +531,9 @@ enum FrameArgs<'a, T: Config, E> { nonce: u64, /// The executable whose `deploy` function is run. executable: E, - /// A salt used in the contract address deriviation of the new contract. + /// A salt used in the contract address derivation of the new contract. salt: &'a [u8], - /// The input data is used in the contract address deriviation of the new contract. + /// The input data is used in the contract address derivation of the new contract. input_data: &'a [u8], }, } diff --git a/substrate/frame/contracts/src/gas.rs b/substrate/frame/contracts/src/gas.rs index b9d91f38f16f957ae6bc40ef08010ecf146dbf03..32fad2140f1463b454a3b8df1cb49b53e79e0c7b 100644 --- a/substrate/frame/contracts/src/gas.rs +++ b/substrate/frame/contracts/src/gas.rs @@ -352,7 +352,7 @@ mod tests { assert!(gas_meter.charge(SimpleToken(1)).is_err()); } - // Make sure that the gas meter does not charge in case of overcharger + // Make sure that the gas meter does not charge in case of overcharge #[test] fn overcharge_does_not_charge() { let mut gas_meter = GasMeter::::new(Weight::from_parts(200, 0)); diff --git a/substrate/frame/contracts/src/lib.rs b/substrate/frame/contracts/src/lib.rs index de84d5220c548a29cf046c85b8fea0520a5d3e91..e14a4b8bcb874015e58747867462c05280e3e52a 100644 --- a/substrate/frame/contracts/src/lib.rs +++ b/substrate/frame/contracts/src/lib.rs @@ -298,6 +298,9 @@ pub mod pallet { /// Therefore please make sure to be restrictive about which dispatchables are allowed /// in order to not introduce a new DoS vector like memory allocation patterns that can /// be exploited to drive the runtime into a panic. + /// + /// This filter does not apply to XCM transact calls. To impose restrictions on XCM transact + /// calls, you must configure them separately within the XCM pallet itself. type CallFilter: Contains<::RuntimeCall>; /// Used to answer contracts' queries regarding the current weight price. This is **not** @@ -1104,7 +1107,7 @@ pub mod pallet { /// A more detailed error can be found on the node console if debug messages are enabled /// by supplying `-lruntime::contracts=debug`. CodeRejected, - /// An indetermistic code was used in a context where this is not permitted. + /// An indeterministic code was used in a context where this is not permitted. Indeterministic, /// A pending migration needs to complete before the extrinsic can be called. MigrationInProgress, diff --git a/substrate/frame/contracts/src/migration/v09.rs b/substrate/frame/contracts/src/migration/v09.rs index f19bff9d6747f8359558e2c2d5a289fad668bfaa..8e718871ecb1844849e18e6480ad1d6d0b7ba5f1 100644 --- a/substrate/frame/contracts/src/migration/v09.rs +++ b/substrate/frame/contracts/src/migration/v09.rs @@ -131,7 +131,7 @@ impl MigrationStep for Migration { let module = CodeStorage::::get(&code_hash).unwrap(); ensure!( module.instruction_weights_version == old.instruction_weights_version, - "invalid isntruction weights version" + "invalid instruction weights version" ); ensure!(module.determinism == Determinism::Enforced, "invalid determinism"); ensure!(module.initial == old.initial, "invalid initial"); diff --git a/substrate/frame/contracts/src/schedule.rs b/substrate/frame/contracts/src/schedule.rs index b2e3801deaec1a36aef589427a6bf9dd324183b8..06a7c2005aa5e46e814d6eff192775ca63881fae 100644 --- a/substrate/frame/contracts/src/schedule.rs +++ b/substrate/frame/contracts/src/schedule.rs @@ -193,7 +193,7 @@ pub struct HostFnWeights { /// Weight of calling `seal_set_storage`. pub set_storage: Weight, - /// Weight per written byten of an item stored with `seal_set_storage`. + /// Weight per written byte of an item stored with `seal_set_storage`. pub set_storage_per_new_byte: Weight, /// Weight per overwritten byte of an item stored with `seal_set_storage`. diff --git a/substrate/frame/contracts/src/tests.rs b/substrate/frame/contracts/src/tests.rs index db6b2e80d07f01f6efa2e5702a73c80e7d498766..a51faa88c4143b55ee42bb394cc8316bb68b16f4 100644 --- a/substrate/frame/contracts/src/tests.rs +++ b/substrate/frame/contracts/src/tests.rs @@ -15,6 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +mod builder; mod pallet_dummy; mod test_debug; @@ -610,7 +611,7 @@ fn calling_plain_account_fails() { let base_cost = <::WeightInfo as WeightInfo>::call(); assert_eq!( - Contracts::call(RuntimeOrigin::signed(ALICE), BOB, 0, GAS_LIMIT, None, Vec::new()), + builder::call(BOB).build(), Err(DispatchErrorWithPostInfo { error: Error::::ContractNotFound.into(), post_info: PostDispatchInfo { @@ -668,32 +669,13 @@ fn migration_in_progress_works() { Contracts::set_code(RuntimeOrigin::signed(ALICE), BOB.clone(), code_hash), Error::::MigrationInProgress, ); + assert_err_ignore_postinfo!(builder::call(BOB).build(), Error::::MigrationInProgress); assert_err_ignore_postinfo!( - Contracts::call(RuntimeOrigin::signed(ALICE), BOB, 0, GAS_LIMIT, None, vec![],), + builder::instantiate_with_code(wasm).value(100_000).build(), Error::::MigrationInProgress, ); assert_err_ignore_postinfo!( - Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), - 100_000, - GAS_LIMIT, - None, - wasm, - vec![], - vec![], - ), - Error::::MigrationInProgress, - ); - assert_err_ignore_postinfo!( - Contracts::instantiate( - RuntimeOrigin::signed(ALICE), - 100_000, - GAS_LIMIT, - None, - code_hash, - vec![], - vec![], - ), + builder::instantiate(code_hash).value(100_000).build(), Error::::MigrationInProgress, ); }); @@ -721,20 +703,9 @@ fn instantiate_and_call_and_deposit_event() { initialize_block(2); // Check at the end to get hash on error easily - let addr = Contracts::bare_instantiate( - ALICE, - value, - GAS_LIMIT, - None, - Code::Existing(code_hash), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Existing(code_hash)) + .value(value) + .build_and_unwrap_account_id(); assert!(ContractInfoOf::::contains_key(&addr)); assert_eq!( @@ -812,41 +783,21 @@ fn deposit_event_max_value_limit() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { // Create let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let addr = Contracts::bare_instantiate( - ALICE, - 30_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(30_000) + .build_and_unwrap_account_id(); // Call contract with allowed storage value. - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT.set_ref_time(GAS_LIMIT.ref_time() * 2), // we are copying a huge buffer, - None, - ::Schedule::get().limits.payload_len.encode(), - )); + assert_ok!(builder::call(addr.clone()) + .gas_limit(GAS_LIMIT.set_ref_time(GAS_LIMIT.ref_time() * 2)) // we are copying a huge buffer, + .data(::Schedule::get().limits.payload_len.encode()) + .build()); // Call contract with too large a storage value. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr, - 0, - GAS_LIMIT, - None, - (::Schedule::get().limits.payload_len + 1).encode(), - ), + builder::call(addr) + .data((::Schedule::get().limits.payload_len + 1).encode()) + .build(), Error::::ValueTooLarge, ); }); @@ -860,32 +811,16 @@ fn run_out_of_fuel_engine() { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let addr = Contracts::bare_instantiate( - ALICE, - 100 * min_balance, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(100 * min_balance) + .build_and_unwrap_account_id(); // Call the contract with a fixed gas limit. It must run out of gas because it just // loops forever. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr, // newly created account - 0, - Weight::from_parts(1_000_000_000_000, u64::MAX), - None, - vec![], - ), + builder::call(addr) + .gas_limit(Weight::from_parts(1_000_000_000_000, u64::MAX)) + .build(), Error::::OutOfGas, ); }); @@ -899,36 +834,18 @@ fn run_out_of_fuel_host() { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(code)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); let gas_limit = Weight::from_parts(u32::MAX as u64, GAS_LIMIT.proof_size()); // Use chain extension to charge more ref_time than it is available. - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - gas_limit, - None, - ExtensionInput { extension_id: 0, func_id: 2, extra: &u32::MAX.encode() }.into(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result; + let result = builder::bare_call(addr.clone()) + .gas_limit(gas_limit) + .data(ExtensionInput { extension_id: 0, func_id: 2, extra: &u32::MAX.encode() }.into()) + .build() + .result; assert_err!(result, >::OutOfGas); }); } @@ -938,63 +855,20 @@ fn gas_syncs_work() { let (code, _code_hash) = compile_module::("caller_is_origin_n").unwrap(); ExtBuilder::default().existential_deposit(200).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_account_id(); - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - 0u32.encode(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); + let result = builder::bare_call(addr.clone()).data(0u32.encode()).build(); assert_ok!(result.result); let engine_consumed_noop = result.gas_consumed.ref_time(); - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - 1u32.encode(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); + let result = builder::bare_call(addr.clone()).data(1u32.encode()).build(); assert_ok!(result.result); let gas_consumed_once = result.gas_consumed.ref_time(); let host_consumed_once = ::Schedule::get().host_fn_weights.caller_is_origin.ref_time(); let engine_consumed_once = gas_consumed_once - host_consumed_once - engine_consumed_noop; - let result = Contracts::bare_call( - ALICE, - addr, - 0, - GAS_LIMIT, - None, - 2u32.encode(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); + let result = builder::bare_call(addr).data(2u32.encode()).build(); assert_ok!(result.result); let gas_consumed_twice = result.gas_consumed.ref_time(); let host_consumed_twice = host_consumed_once * 2; @@ -1018,56 +892,21 @@ fn instantiate_unique_trie_id() { .unwrap(); // Instantiate the contract and store its trie id for later comparison. - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Existing(code_hash), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = + builder::bare_instantiate(Code::Existing(code_hash)).build_and_unwrap_account_id(); let trie_id = get_contract(&addr).trie_id; // Try to instantiate it again without termination should yield an error. assert_err_ignore_postinfo!( - Contracts::instantiate( - RuntimeOrigin::signed(ALICE), - 0, - GAS_LIMIT, - None, - code_hash, - vec![], - vec![], - ), + builder::instantiate(code_hash).build(), >::DuplicateContract, ); // Terminate the contract. - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![] - )); + assert_ok!(builder::call(addr.clone()).build()); // Re-Instantiate after termination. - assert_ok!(Contracts::instantiate( - RuntimeOrigin::signed(ALICE), - 0, - GAS_LIMIT, - None, - code_hash, - vec![], - vec![], - )); + assert_ok!(builder::instantiate(code_hash).build()); // Trie ids shouldn't match or we might have a collision assert_ne!(trie_id, get_contract(&addr).trie_id); @@ -1081,42 +920,22 @@ fn storage_max_value_limit() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { // Create let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let addr = Contracts::bare_instantiate( - ALICE, - 30_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(30_000) + .build_and_unwrap_account_id(); get_contract(&addr); // Call contract with allowed storage value. - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT.set_ref_time(GAS_LIMIT.ref_time() * 2), // we are copying a huge buffer - None, - ::Schedule::get().limits.payload_len.encode(), - )); + assert_ok!(builder::call(addr.clone()) + .gas_limit(GAS_LIMIT.set_ref_time(GAS_LIMIT.ref_time() * 2)) // we are copying a huge buffer + .data(::Schedule::get().limits.payload_len.encode()) + .build()); // Call contract with too large a storage value. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr, - 0, - GAS_LIMIT, - None, - (::Schedule::get().limits.payload_len + 1).encode(), - ), + builder::call(addr) + .data((::Schedule::get().limits.payload_len + 1).encode()) + .build(), Error::::ValueTooLarge, ); }); @@ -1132,20 +951,9 @@ fn deploy_and_call_other_contract() { // Create let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let caller_addr = Contracts::bare_instantiate( - ALICE, - 100_000, - GAS_LIMIT, - None, - Code::Upload(caller_wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let caller_addr = builder::bare_instantiate(Code::Upload(caller_wasm)) + .value(100_000) + .build_and_unwrap_account_id(); Contracts::bare_upload_code(ALICE, callee_wasm, None, Determinism::Enforced).unwrap(); let callee_addr = Contracts::contract_address( @@ -1160,14 +968,9 @@ fn deploy_and_call_other_contract() { // Call BOB contract, which attempts to instantiate and call the callee contract and // makes various assertions on the results from those calls. - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - caller_addr.clone(), - 0, - GAS_LIMIT, - None, - callee_code_hash.as_ref().to_vec(), - )); + assert_ok!(builder::call(caller_addr.clone()) + .data(callee_code_hash.as_ref().to_vec()) + .build()); assert_eq!( System::events(), @@ -1266,20 +1069,9 @@ fn delegate_call() { let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Instantiate the 'caller' - let caller_addr = Contracts::bare_instantiate( - ALICE, - 300_000, - GAS_LIMIT, - None, - Code::Upload(caller_wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let caller_addr = builder::bare_instantiate(Code::Upload(caller_wasm)) + .value(300_000) + .build_and_unwrap_account_id(); // Only upload 'callee' code assert_ok!(Contracts::upload_code( RuntimeOrigin::signed(ALICE), @@ -1288,14 +1080,10 @@ fn delegate_call() { Determinism::Enforced, )); - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - caller_addr.clone(), - 1337, - GAS_LIMIT, - None, - callee_code_hash.as_ref().to_vec(), - )); + assert_ok!(builder::call(caller_addr.clone()) + .value(1337) + .data(callee_code_hash.as_ref().to_vec()) + .build()); }); } @@ -1306,20 +1094,9 @@ fn transfer_expendable_cannot_kill_account() { let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Instantiate the BOB contract. - let addr = Contracts::bare_instantiate( - ALICE, - 1_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(1_000) + .build_and_unwrap_account_id(); // Check that the BOB contract has been instantiated. get_contract(&addr); @@ -1347,7 +1124,7 @@ fn transfer_expendable_cannot_kill_account() { } #[test] -fn cannot_self_destruct_through_draning() { +fn cannot_self_destruct_through_draining() { let (wasm, _code_hash) = compile_module::("drain").unwrap(); ExtBuilder::default().existential_deposit(200).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); @@ -1355,34 +1132,16 @@ fn cannot_self_destruct_through_draning() { let min_balance = Contracts::min_balance(); // Instantiate the BOB contract. - let addr = Contracts::bare_instantiate( - ALICE, - value, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(value) + .build_and_unwrap_account_id(); // Check that the BOB contract has been instantiated. get_contract(&addr); // Call BOB which makes it send all funds to the zero address // The contract code asserts that the transfer fails with the correct error code - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![] - )); + assert_ok!(builder::call(addr.clone()).build()); // Make sure the account wasn't remove by sending all free balance away. assert_eq!( @@ -1400,20 +1159,7 @@ fn cannot_self_destruct_through_storage_refund_after_price_change() { let min_balance = Contracts::min_balance(); // Instantiate the BOB contract. - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_account_id(); let info_deposit = test_utils::contract_info_storage_deposit(&addr); @@ -1423,27 +1169,13 @@ fn cannot_self_destruct_through_storage_refund_after_price_change() { assert_eq!(::Currency::total_balance(&addr), info_deposit + min_balance); // Create 100 bytes of storage with a price of per byte and a single storage item of price 2 - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - 100u32.to_le_bytes().to_vec() - )); + assert_ok!(builder::call(addr.clone()).data(100u32.to_le_bytes().to_vec()).build()); assert_eq!(get_contract(&addr).total_deposit(), info_deposit + 102); // Increase the byte price and trigger a refund. This should not have any influence because // the removal is pro rata and exactly those 100 bytes should have been removed. DEPOSIT_PER_BYTE.with(|c| *c.borrow_mut() = 500); - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - 0u32.to_le_bytes().to_vec() - )); + assert_ok!(builder::call(addr.clone()).data(0u32.to_le_bytes().to_vec()).build()); // Make sure the account wasn't removed by the refund assert_eq!( @@ -1461,20 +1193,9 @@ fn cannot_self_destruct_while_live() { let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Instantiate the BOB contract. - let addr = Contracts::bare_instantiate( - ALICE, - 100_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(100_000) + .build_and_unwrap_account_id(); // Check that the BOB contract has been instantiated. get_contract(&addr); @@ -1482,14 +1203,7 @@ fn cannot_self_destruct_while_live() { // Call BOB with input data, forcing it make a recursive call to itself to // self-destruct, resulting in a trap. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![0], - ), + builder::call(addr.clone()).data(vec![0]).build(), Error::::ContractTrapped, ); @@ -1507,20 +1221,9 @@ fn self_destruct_works() { let min_balance = Contracts::min_balance(); // Instantiate the BOB contract. - let addr = Contracts::bare_instantiate( - ALICE, - 100_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(100_000) + .build_and_unwrap_account_id(); // Check that the BOB contract has been instantiated. let _ = get_contract(&addr); @@ -1531,10 +1234,7 @@ fn self_destruct_works() { initialize_block(2); // Call BOB without input data which triggers termination. - assert_matches!( - Contracts::call(RuntimeOrigin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, None, vec![],), - Ok(_) - ); + assert_matches!(builder::call(addr.clone()).build(), Ok(_)); // Check that code is still there but refcount dropped to zero. assert_refcount!(&code_hash, 0); @@ -1621,20 +1321,10 @@ fn destroy_contract_and_transfer_funds() { // This deploys the BOB contract, which in turn deploys the CHARLIE contract during // construction. - let addr_bob = Contracts::bare_instantiate( - ALICE, - 200_000, - GAS_LIMIT, - None, - Code::Upload(caller_wasm), - callee_code_hash.as_ref().to_vec(), - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr_bob = builder::bare_instantiate(Code::Upload(caller_wasm)) + .value(200_000) + .data(callee_code_hash.as_ref().to_vec()) + .build_and_unwrap_account_id(); // Check that the CHARLIE contract has been instantiated. let addr_charlie = @@ -1642,14 +1332,7 @@ fn destroy_contract_and_transfer_funds() { get_contract(&addr_charlie); // Call BOB, which calls CHARLIE, forcing CHARLIE to self-destruct. - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr_bob, - 0, - GAS_LIMIT, - None, - addr_charlie.encode(), - )); + assert_ok!(builder::call(addr_bob).data(addr_charlie.encode()).build()); // Check that CHARLIE has moved on to the great beyond (ie. died). assert!(get_contract_checked(&addr_charlie).is_none()); @@ -1662,17 +1345,9 @@ fn cannot_self_destruct_in_constructor() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - // Fail to instantiate the BOB because the contructor calls seal_terminate. + // Fail to instantiate the BOB because the constructor calls seal_terminate. assert_err_ignore_postinfo!( - Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), - 100_000, - GAS_LIMIT, - None, - wasm, - vec![], - vec![], - ), + builder::instantiate_with_code(wasm).value(100_000).build(), Error::::TerminatedInConstructor, ); }); @@ -1686,20 +1361,9 @@ fn crypto_hashes() { let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Instantiate the CRYPTO_HASHES contract. - let addr = Contracts::bare_instantiate( - ALICE, - 100_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(100_000) + .build_and_unwrap_account_id(); // Perform the call. let input = b"_DEAD_BEEF"; use sp_io::hashing::*; @@ -1748,36 +1412,13 @@ fn transfer_return_code() { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); // Contract has only the minimal balance so any transfer will fail. ::Currency::set_balance(&addr, min_balance); - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(addr.clone()).build_and_unwrap_result(); assert_return_code!(result, RuntimeReturnCode::TransferFailed); }); } @@ -1791,113 +1432,60 @@ fn call_return_code() { let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); let _ = ::Currency::set_balance(&CHARLIE, 1000 * min_balance); - let addr_bob = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(caller_code), - vec![0], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr_bob = builder::bare_instantiate(Code::Upload(caller_code)) + .value(min_balance * 100) + .data(vec![0]) + .build_and_unwrap_account_id(); ::Currency::set_balance(&addr_bob, min_balance); // Contract calls into Django which is no valid contract - let result = Contracts::bare_call( - ALICE, - addr_bob.clone(), - 0, - GAS_LIMIT, - None, - AsRef::<[u8]>::as_ref(&DJANGO).to_vec(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(addr_bob.clone()) + .data(AsRef::<[u8]>::as_ref(&DJANGO).to_vec()) + .build_and_unwrap_result(); assert_return_code!(result, RuntimeReturnCode::NotCallable); - let addr_django = Contracts::bare_instantiate( - CHARLIE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(callee_code), - vec![0], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr_django = builder::bare_instantiate(Code::Upload(callee_code)) + .origin(CHARLIE) + .value(min_balance * 100) + .data(vec![0]) + .build_and_unwrap_account_id(); ::Currency::set_balance(&addr_django, min_balance); // Contract has only the minimal balance so any transfer will fail. - let result = Contracts::bare_call( - ALICE, - addr_bob.clone(), - 0, - GAS_LIMIT, - None, - AsRef::<[u8]>::as_ref(&addr_django) - .iter() - .chain(&0u32.to_le_bytes()) - .cloned() - .collect(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(addr_bob.clone()) + .data( + AsRef::<[u8]>::as_ref(&addr_django) + .iter() + .chain(&0u32.to_le_bytes()) + .cloned() + .collect(), + ) + .build_and_unwrap_result(); assert_return_code!(result, RuntimeReturnCode::TransferFailed); // Contract has enough balance but callee reverts because "1" is passed. ::Currency::set_balance(&addr_bob, min_balance + 1000); - let result = Contracts::bare_call( - ALICE, - addr_bob.clone(), - 0, - GAS_LIMIT, - None, - AsRef::<[u8]>::as_ref(&addr_django) - .iter() - .chain(&1u32.to_le_bytes()) - .cloned() - .collect(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(addr_bob.clone()) + .data( + AsRef::<[u8]>::as_ref(&addr_django) + .iter() + .chain(&1u32.to_le_bytes()) + .cloned() + .collect(), + ) + .build_and_unwrap_result(); assert_return_code!(result, RuntimeReturnCode::CalleeReverted); // Contract has enough balance but callee traps because "2" is passed. - let result = Contracts::bare_call( - ALICE, - addr_bob, - 0, - GAS_LIMIT, - None, - AsRef::<[u8]>::as_ref(&addr_django) - .iter() - .chain(&2u32.to_le_bytes()) - .cloned() - .collect(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(addr_bob) + .data( + AsRef::<[u8]>::as_ref(&addr_django) + .iter() + .chain(&2u32.to_le_bytes()) + .cloned() + .collect(), + ) + .build_and_unwrap_result(); assert_return_code!(result, RuntimeReturnCode::CalleeTrapped); }); } @@ -1912,95 +1500,34 @@ fn instantiate_return_code() { let _ = ::Currency::set_balance(&CHARLIE, 1000 * min_balance); let callee_hash = callee_hash.as_ref().to_vec(); - assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), - min_balance * 100, - GAS_LIMIT, - None, - callee_code, - vec![], - vec![], - )); + assert_ok!(builder::instantiate_with_code(callee_code).value(min_balance * 100).build()); - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(caller_code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(caller_code)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); // Contract has only the minimal balance so any transfer will fail. ::Currency::set_balance(&addr, min_balance); - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - callee_hash.clone(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(addr.clone()) + .data(callee_hash.clone()) + .build_and_unwrap_result(); assert_return_code!(result, RuntimeReturnCode::TransferFailed); // Contract has enough balance but the passed code hash is invalid ::Currency::set_balance(&addr, min_balance + 10_000); - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![0; 33], - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(addr.clone()).data(vec![0; 33]).build_and_unwrap_result(); assert_return_code!(result, RuntimeReturnCode::CodeNotFound); // Contract has enough balance but callee reverts because "1" is passed. - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - callee_hash.iter().chain(&1u32.to_le_bytes()).cloned().collect(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(addr.clone()) + .data(callee_hash.iter().chain(&1u32.to_le_bytes()).cloned().collect()) + .build_and_unwrap_result(); assert_return_code!(result, RuntimeReturnCode::CalleeReverted); // Contract has enough balance but callee traps because "2" is passed. - let result = Contracts::bare_call( - ALICE, - addr, - 0, - GAS_LIMIT, - None, - callee_hash.iter().chain(&2u32.to_le_bytes()).cloned().collect(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(addr) + .data(callee_hash.iter().chain(&2u32.to_le_bytes()).cloned().collect()) + .build_and_unwrap_result(); assert_return_code!(result, RuntimeReturnCode::CalleeTrapped); }); } @@ -2013,15 +1540,7 @@ fn disabled_chain_extension_wont_deploy() { let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); TestExtension::disable(); assert_err_ignore_postinfo!( - Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), - 3 * min_balance, - GAS_LIMIT, - None, - code, - vec![], - vec![], - ), + builder::instantiate_with_code(code).value(3 * min_balance).build(), >::CodeRejected, ); }); @@ -2033,23 +1552,12 @@ fn disabled_chain_extension_errors_on_call() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(code)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); TestExtension::disable(); assert_err_ignore_postinfo!( - Contracts::call(RuntimeOrigin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, None, vec![],), + builder::call(addr.clone()).build(), Error::::CodeRejected, ); }); @@ -2061,141 +1569,61 @@ fn chain_extension_works() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(code)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); // 0 = read input buffer and pass it through as output let input: Vec = ExtensionInput { extension_id: 0, func_id: 0, extra: &[99] }.into(); - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - input.clone(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); + let result = builder::bare_call(addr.clone()).data(input.clone()).build(); assert_eq!(TestExtension::last_seen_buffer(), input); assert_eq!(result.result.unwrap().data, input); // 1 = treat inputs as integer primitives and store the supplied integers - Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - ExtensionInput { extension_id: 0, func_id: 1, extra: &[] }.into(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + builder::bare_call(addr.clone()) + .data(ExtensionInput { extension_id: 0, func_id: 1, extra: &[] }.into()) + .build_and_unwrap_result(); assert_eq!(TestExtension::last_seen_input_len(), 4); // 2 = charge some extra weight (amount supplied in the fifth byte) - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - ExtensionInput { extension_id: 0, func_id: 2, extra: &0u32.encode() }.into(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); + let result = builder::bare_call(addr.clone()) + .data(ExtensionInput { extension_id: 0, func_id: 2, extra: &0u32.encode() }.into()) + .build(); assert_ok!(result.result); let gas_consumed = result.gas_consumed; - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - ExtensionInput { extension_id: 0, func_id: 2, extra: &42u32.encode() }.into(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); + let result = builder::bare_call(addr.clone()) + .data(ExtensionInput { extension_id: 0, func_id: 2, extra: &42u32.encode() }.into()) + .build(); assert_ok!(result.result); assert_eq!(result.gas_consumed.ref_time(), gas_consumed.ref_time() + 42); - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - ExtensionInput { extension_id: 0, func_id: 2, extra: &95u32.encode() }.into(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); + let result = builder::bare_call(addr.clone()) + .data(ExtensionInput { extension_id: 0, func_id: 2, extra: &95u32.encode() }.into()) + .build(); assert_ok!(result.result); assert_eq!(result.gas_consumed.ref_time(), gas_consumed.ref_time() + 95); // 3 = diverging chain extension call that sets flags to 0x1 and returns a fixed buffer - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - ExtensionInput { extension_id: 0, func_id: 3, extra: &[] }.into(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(addr.clone()) + .data(ExtensionInput { extension_id: 0, func_id: 3, extra: &[] }.into()) + .build_and_unwrap_result(); assert_eq!(result.flags, ReturnFlags::REVERT); assert_eq!(result.data, vec![42, 99]); // diverging to second chain extension that sets flags to 0x1 and returns a fixed buffer // We set the MSB part to 1 (instead of 0) which routes the request into the second // extension - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - ExtensionInput { extension_id: 1, func_id: 0, extra: &[] }.into(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(addr.clone()) + .data(ExtensionInput { extension_id: 1, func_id: 0, extra: &[] }.into()) + .build_and_unwrap_result(); assert_eq!(result.flags, ReturnFlags::REVERT); assert_eq!(result.data, vec![0x4B, 0x1D]); // Diverging to third chain extension that is disabled // We set the MSB part to 2 (instead of 0) which routes the request into the third extension assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - ExtensionInput { extension_id: 2, func_id: 0, extra: &[] }.into(), - ), + builder::call(addr.clone()) + .data(ExtensionInput { extension_id: 2, func_id: 0, extra: &[] }.into()) + .build(), Error::::NoChainExtension, ); }); @@ -2207,20 +1635,9 @@ fn chain_extension_temp_storage_works() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(code)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); // Call func 0 and func 1 back to back. let stop_recursion = 0u8; @@ -2231,20 +1648,7 @@ fn chain_extension_temp_storage_works() { .as_ref(), ); - assert_ok!( - Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - input.clone(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - ); + assert_ok!(builder::bare_call(addr.clone()).data(input.clone()).build().result); }) } @@ -2255,20 +1659,9 @@ fn lazy_removal_works() { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(code)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); let info = get_contract(&addr); let trie = &info.child_trie_info(); @@ -2277,14 +1670,7 @@ fn lazy_removal_works() { child::put(trie, &[99], &42); // Terminate the contract - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![] - )); + assert_ok!(builder::call(addr.clone()).build()); // Contract info should be gone assert!(!>::contains_key(&addr)); @@ -2309,20 +1695,10 @@ fn lazy_batch_removal_works() { let mut tries: Vec = vec![]; for i in 0..3u8 { - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(code.clone()), - vec![], - vec![i], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(code.clone())) + .value(min_balance * 100) + .salt(vec![i]) + .build_and_unwrap_account_id(); let info = get_contract(&addr); let trie = &info.child_trie_info(); @@ -2332,14 +1708,7 @@ fn lazy_batch_removal_works() { // Terminate the contract. Contract info should be gone, but value should be still there // as the lazy removal did not run, yet. - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![] - )); + assert_ok!(builder::call(addr.clone()).build()); assert!(!>::contains_key(&addr)); assert_matches!(child::get(trie, &[99]), Some(42)); @@ -2375,20 +1744,9 @@ fn lazy_removal_partial_remove_works() { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(code)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); let info = get_contract(&addr); @@ -2399,14 +1757,7 @@ fn lazy_removal_partial_remove_works() { >::insert(&addr, info.clone()); // Terminate the contract - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![] - )); + assert_ok!(builder::call(addr.clone()).build()); // Contract info should be gone assert!(!>::contains_key(&addr)); @@ -2457,20 +1808,9 @@ fn lazy_removal_does_no_run_on_low_remaining_weight() { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(code)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); let info = get_contract(&addr); let trie = &info.child_trie_info(); @@ -2479,14 +1819,7 @@ fn lazy_removal_does_no_run_on_low_remaining_weight() { child::put(trie, &[99], &42); // Terminate the contract - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![] - )); + assert_ok!(builder::call(addr.clone()).build()); // Contract info should be gone assert!(!>::contains_key(&addr)); @@ -2529,20 +1862,9 @@ fn lazy_removal_does_not_use_all_weight() { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(code)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); let info = get_contract(&addr); let (weight_per_key, max_keys) = ContractInfo::::deletion_budget(weight_limit); @@ -2559,14 +1881,7 @@ fn lazy_removal_does_not_use_all_weight() { >::insert(&addr, info.clone()); // Terminate the contract - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![] - )); + assert_ok!(builder::call(addr.clone()).build()); // Contract info should be gone assert!(!>::contains_key(&addr)); @@ -2620,20 +1935,10 @@ fn deletion_queue_ring_buffer_overflow() { // add 3 contracts to the deletion queue for i in 0..3u8 { - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(code.clone()), - vec![], - vec![i], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(code.clone())) + .value(min_balance * 100) + .salt(vec![i]) + .build_and_unwrap_account_id(); let info = get_contract(&addr); let trie = &info.child_trie_info(); @@ -2643,14 +1948,7 @@ fn deletion_queue_ring_buffer_overflow() { // Terminate the contract. Contract info should be gone, but value should be still // there as the lazy removal did not run, yet. - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![] - )); + assert_ok!(builder::call(addr.clone()).build()); assert!(!>::contains_key(&addr)); assert_matches!(child::get(trie, &[99]), Some(42)); @@ -2678,87 +1976,36 @@ fn refcounter() { let min_balance = Contracts::min_balance(); // Create two contracts with the same code and check that they do in fact share it. - let addr0 = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(wasm.clone()), - vec![], - vec![0], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; - let addr1 = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(wasm.clone()), - vec![], - vec![1], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr0 = builder::bare_instantiate(Code::Upload(wasm.clone())) + .value(min_balance * 100) + .salt(vec![0]) + .build_and_unwrap_account_id(); + let addr1 = builder::bare_instantiate(Code::Upload(wasm.clone())) + .value(min_balance * 100) + .salt(vec![1]) + .build_and_unwrap_account_id(); assert_refcount!(code_hash, 2); // Sharing should also work with the usual instantiate call - let addr2 = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Existing(code_hash), - vec![], - vec![2], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr2 = builder::bare_instantiate(Code::Existing(code_hash)) + .value(min_balance * 100) + .salt(vec![2]) + .build_and_unwrap_account_id(); assert_refcount!(code_hash, 3); - - // Terminating one contract should decrement the refcount - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr0, - 0, - GAS_LIMIT, - None, - vec![] - )); + + // Terminating one contract should decrement the refcount + assert_ok!(builder::call(addr0).build()); assert_refcount!(code_hash, 2); // remove another one - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr1, - 0, - GAS_LIMIT, - None, - vec![] - )); + assert_ok!(builder::call(addr1).build()); assert_refcount!(code_hash, 1); // Pristine code should still be there PristineCode::::get(code_hash).unwrap(); // remove the last contract - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr2, - 0, - GAS_LIMIT, - None, - vec![] - )); + assert_ok!(builder::call(addr2).build()); assert_refcount!(code_hash, 0); // refcount is `0` but code should still exists because it needs to be removed manually @@ -2772,31 +2019,10 @@ fn debug_message_works() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let addr = Contracts::bare_instantiate( - ALICE, - 30_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; - let result = Contracts::bare_call( - ALICE, - addr, - 0, - GAS_LIMIT, - None, - vec![], - DebugInfo::UnsafeDebug, - CollectEvents::Skip, - Determinism::Enforced, - ); + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(30_000) + .build_and_unwrap_account_id(); + let result = builder::bare_call(addr).debug(DebugInfo::UnsafeDebug).build(); assert_matches!(result.result, Ok(_)); assert_eq!(std::str::from_utf8(&result.debug_message).unwrap(), "Hello World!"); @@ -2809,32 +2035,11 @@ fn debug_message_logging_disabled() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let addr = Contracts::bare_instantiate( - ALICE, - 30_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(30_000) + .build_and_unwrap_account_id(); // disable logging by passing `false` - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); + let result = builder::bare_call(addr.clone()).build(); assert_matches!(result.result, Ok(_)); // the dispatchables always run without debugging assert_ok!(Contracts::call(RuntimeOrigin::signed(ALICE), addr, 0, GAS_LIMIT, None, vec![])); @@ -2848,31 +2053,10 @@ fn debug_message_invalid_utf8() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let addr = Contracts::bare_instantiate( - ALICE, - 30_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; - let result = Contracts::bare_call( - ALICE, - addr, - 0, - GAS_LIMIT, - None, - vec![], - DebugInfo::UnsafeDebug, - CollectEvents::Skip, - Determinism::Enforced, - ); + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(30_000) + .build_and_unwrap_account_id(); + let result = builder::bare_call(addr).debug(DebugInfo::UnsafeDebug).build(); assert_ok!(result.result); assert!(result.debug_message.is_empty()); }); @@ -2887,50 +2071,17 @@ fn gas_estimation_for_subcalls() { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 2_000 * min_balance); - let addr_caller = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(caller_code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr_caller = builder::bare_instantiate(Code::Upload(caller_code)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); - let addr_dummy = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(dummy_code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr_dummy = builder::bare_instantiate(Code::Upload(dummy_code)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); - let addr_call_runtime = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(call_runtime_code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr_call_runtime = builder::bare_instantiate(Code::Upload(call_runtime_code)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); // Run the test for all of those weight limits for the subcall let weights = [ @@ -2966,17 +2117,7 @@ fn gas_estimation_for_subcalls() { .collect(); // Call in order to determine the gas that is required for this call - let result = Contracts::bare_call( - ALICE, - addr_caller.clone(), - 0, - GAS_LIMIT, - None, - input.clone(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); + let result = builder::bare_call(addr_caller.clone()).data(input.clone()).build(); assert_ok!(&result.result); // If the out of gas happens in the subcall the caller contract @@ -2992,51 +2133,33 @@ fn gas_estimation_for_subcalls() { // Make the same call using the estimated gas. Should succeed. assert_ok!( - Contracts::bare_call( - ALICE, - addr_caller.clone(), - 0, - result.gas_required, - Some(result.storage_deposit.charge_or_zero()), - input.clone(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result + builder::bare_call(addr_caller.clone()) + .gas_limit(result.gas_required) + .storage_deposit_limit(Some(result.storage_deposit.charge_or_zero())) + .data(input.clone()) + .build() + .result ); // Check that it fails with too little ref_time assert_err!( - Contracts::bare_call( - ALICE, - addr_caller.clone(), - 0, - result.gas_required.sub_ref_time(1), - Some(result.storage_deposit.charge_or_zero()), - input.clone(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result, + builder::bare_call(addr_caller.clone()) + .gas_limit(result.gas_required.sub_ref_time(1)) + .storage_deposit_limit(Some(result.storage_deposit.charge_or_zero())) + .data(input.clone()) + .build() + .result, error, ); // Check that it fails with too little proof_size assert_err!( - Contracts::bare_call( - ALICE, - addr_caller.clone(), - 0, - result.gas_required.sub_proof_size(1), - Some(result.storage_deposit.charge_or_zero()), - input, - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result, + builder::bare_call(addr_caller.clone()) + .gas_limit(result.gas_required.sub_proof_size(1)) + .storage_deposit_limit(Some(result.storage_deposit.charge_or_zero())) + .data(input) + .build() + .result, error, ); } @@ -3052,20 +2175,10 @@ fn gas_estimation_call_runtime() { let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); let _ = ::Currency::set_balance(&CHARLIE, 1000 * min_balance); - let addr_caller = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(caller_code), - vec![], - vec![0], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr_caller = builder::bare_instantiate(Code::Upload(caller_code)) + .value(min_balance * 100) + .salt(vec![0]) + .build_and_unwrap_account_id(); // Call something trivial with a huge gas limit so that we can observe the effects // of pre-charging. This should create a difference between consumed and required. @@ -3073,17 +2186,7 @@ fn gas_estimation_call_runtime() { pre_charge: Weight::from_parts(10_000_000, 1_000), actual_weight: Weight::from_parts(100, 100), }); - let result = Contracts::bare_call( - ALICE, - addr_caller.clone(), - 0, - GAS_LIMIT, - None, - call.encode(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); + let result = builder::bare_call(addr_caller.clone()).data(call.encode()).build(); // contract encodes the result of the dispatch runtime let outcome = u32::decode(&mut result.result.unwrap().data.as_ref()).unwrap(); assert_eq!(outcome, 0); @@ -3091,18 +2194,11 @@ fn gas_estimation_call_runtime() { // Make the same call using the required gas. Should succeed. assert_ok!( - Contracts::bare_call( - ALICE, - addr_caller, - 0, - result.gas_required, - None, - call.encode(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result + builder::bare_call(addr_caller) + .gas_limit(result.gas_required) + .data(call.encode()) + .build() + .result ); }); } @@ -3116,35 +2212,15 @@ fn call_runtime_reentrancy_guarded() { let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); let _ = ::Currency::set_balance(&CHARLIE, 1000 * min_balance); - let addr_caller = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(caller_code), - vec![], - vec![0], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr_caller = builder::bare_instantiate(Code::Upload(caller_code)) + .value(min_balance * 100) + .salt(vec![0]) + .build_and_unwrap_account_id(); - let addr_callee = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(callee_code), - vec![], - vec![1], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr_callee = builder::bare_instantiate(Code::Upload(callee_code)) + .value(min_balance * 100) + .salt(vec![1]) + .build_and_unwrap_account_id(); // Call pallet_contracts call() dispatchable let call = RuntimeCall::Contracts(crate::Call::call { @@ -3157,19 +2233,9 @@ fn call_runtime_reentrancy_guarded() { // Call runtime to re-enter back to contracts engine by // calling dummy contract - let result = Contracts::bare_call( - ALICE, - addr_caller.clone(), - 0, - GAS_LIMIT, - None, - call.encode(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(addr_caller.clone()) + .data(call.encode()) + .build_and_unwrap_result(); // Call to runtime should fail because of the re-entrancy guard assert_return_code!(result, RuntimeReturnCode::CallRuntimeFailed); }); @@ -3183,20 +2249,9 @@ fn ecdsa_recover() { let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Instantiate the ecdsa_recover contract. - let addr = Contracts::bare_instantiate( - ALICE, - 100_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(100_000) + .build_and_unwrap_account_id(); #[rustfmt::skip] let signature: [u8; 65] = [ @@ -3246,17 +2301,10 @@ fn bare_instantiate_returns_events() { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let result = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::UnsafeCollect, - ); + let result = builder::bare_instantiate(Code::Upload(wasm)) + .value(min_balance * 100) + .collect_events(CollectEvents::UnsafeCollect) + .build(); let events = result.events.unwrap(); assert!(!events.is_empty()); @@ -3271,17 +2319,7 @@ fn bare_instantiate_does_not_return_events() { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let result = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ); + let result = builder::bare_instantiate(Code::Upload(wasm)).value(min_balance * 100).build(); let events = result.events; assert!(!System::events().is_empty()); @@ -3296,32 +2334,13 @@ fn bare_call_returns_events() { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![], - DebugInfo::Skip, - CollectEvents::UnsafeCollect, - Determinism::Enforced, - ); + let result = builder::bare_call(addr.clone()) + .collect_events(CollectEvents::UnsafeCollect) + .build(); let events = result.events.unwrap(); assert_return_code!(&result.result.unwrap(), RuntimeReturnCode::Success); @@ -3337,32 +2356,11 @@ fn bare_call_does_not_return_events() { let min_balance = Contracts::min_balance(); let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let addr = Contracts::bare_instantiate( - ALICE, - min_balance * 100, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(min_balance * 100) + .build_and_unwrap_account_id(); - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); + let result = builder::bare_call(addr.clone()).build(); let events = result.events; assert_return_code!(&result.result.unwrap(), RuntimeReturnCode::Success); @@ -3379,20 +2377,9 @@ fn sr25519_verify() { let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Instantiate the sr25519_verify contract. - let addr = Contracts::bare_instantiate( - ALICE, - 100_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(100_000) + .build_and_unwrap_account_id(); let call_with = |message: &[u8; 11]| { // Alice's signature for "hello world" @@ -3450,34 +2437,10 @@ fn failed_deposit_charge_should_roll_back_call() { let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Instantiate both contracts. - let addr_caller = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm_caller.clone()), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; - let addr_callee = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm_callee.clone()), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr_caller = builder::bare_instantiate(Code::Upload(wasm_caller.clone())) + .build_and_unwrap_account_id(); + let addr_callee = builder::bare_instantiate(Code::Upload(wasm_callee.clone())) + .build_and_unwrap_account_id(); // Give caller proxy access to Alice. assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(ALICE), addr_caller.clone(), (), 0)); @@ -3701,15 +2664,7 @@ fn remove_code_in_use() { ExtBuilder::default().existential_deposit(100).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), - 0, - GAS_LIMIT, - None, - wasm, - vec![], - vec![], - )); + assert_ok!(builder::instantiate_with_code(wasm).build()); // Drop previous events initialize_block(2); @@ -3753,20 +2708,7 @@ fn instantiate_with_zero_balance_works() { initialize_block(2); // Instantiate the BOB contract. - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_account_id(); // Ensure the contract was stored and get expected deposit amount to be reserved. let deposit_expected = expected_deposit(ensure_stored(code_hash)); @@ -3850,20 +2792,9 @@ fn instantiate_with_below_existential_deposit_works() { initialize_block(2); // Instantiate the BOB contract. - let addr = Contracts::bare_instantiate( - ALICE, - value, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(value) + .build_and_unwrap_account_id(); // Ensure the contract was stored and get expected deposit amount to be reserved. let deposit_expected = expected_deposit(ensure_stored(code_hash)); @@ -3949,20 +2880,7 @@ fn storage_deposit_works() { ExtBuilder::default().existential_deposit(200).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_account_id(); let mut deposit = test_utils::contract_info_storage_deposit(&addr); @@ -3970,41 +2888,23 @@ fn storage_deposit_works() { initialize_block(2); // Create storage - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 42, - GAS_LIMIT, - None, - (1_000u32, 5_000u32).encode(), - )); + assert_ok!(builder::call(addr.clone()) + .value(42) + .data((1_000u32, 5_000u32).encode()) + .build()); // 4 is for creating 2 storage items let charged0 = 4 + 1_000 + 5_000; deposit += charged0; assert_eq!(get_contract(&addr).total_deposit(), deposit); - - // Add more storage (but also remove some) - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - (2_000u32, 4_900u32).encode(), - )); + + // Add more storage (but also remove some) + assert_ok!(builder::call(addr.clone()).data((2_000u32, 4_900u32).encode()).build()); let charged1 = 1_000 - 100; deposit += charged1; assert_eq!(get_contract(&addr).total_deposit(), deposit); // Remove more storage (but also add some) - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - (2_100u32, 900u32).encode(), - )); + assert_ok!(builder::call(addr.clone()).data((2_100u32, 900u32).encode()).build()); // -1 for numeric instability let refunded0 = 4_000 - 100 - 1; deposit -= refunded0; @@ -4093,43 +2993,12 @@ fn storage_deposit_callee_works() { let min_balance = Contracts::min_balance(); // Create both contracts: Constructors do nothing. - let addr_caller = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm_caller), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; - let addr_callee = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm_callee), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr_caller = + builder::bare_instantiate(Code::Upload(wasm_caller)).build_and_unwrap_account_id(); + let addr_callee = + builder::bare_instantiate(Code::Upload(wasm_callee)).build_and_unwrap_account_id(); - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr_caller, - 0, - GAS_LIMIT, - None, - (100u32, &addr_callee).encode() - )); + assert_ok!(builder::call(addr_caller).data((100u32, &addr_callee).encode()).build()); let callee = get_contract(&addr_callee); let deposit = DepositPerByte::get() * 100 + DepositPerItem::get() * 1; @@ -4152,20 +3021,7 @@ fn set_code_extrinsic() { ExtBuilder::default().existential_deposit(100).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_account_id(); assert_ok!(Contracts::upload_code( RuntimeOrigin::signed(ALICE), @@ -4189,7 +3045,7 @@ fn set_code_extrinsic() { assert_eq!(get_contract(&addr).code_hash, code_hash); assert_refcount!(&code_hash, 1); assert_refcount!(&new_code_hash, 0); - assert_eq!(System::events(), vec![],); + assert_eq!(System::events(), vec![]); // contract must exist assert_noop!( @@ -4199,7 +3055,7 @@ fn set_code_extrinsic() { assert_eq!(get_contract(&addr).code_hash, code_hash); assert_refcount!(&code_hash, 1); assert_refcount!(&new_code_hash, 0); - assert_eq!(System::events(), vec![],); + assert_eq!(System::events(), vec![]); // new code hash must exist assert_noop!( @@ -4209,7 +3065,7 @@ fn set_code_extrinsic() { assert_eq!(get_contract(&addr).code_hash, code_hash); assert_refcount!(&code_hash, 1); assert_refcount!(&new_code_hash, 0); - assert_eq!(System::events(), vec![],); + assert_eq!(System::events(), vec![]); // successful call assert_ok!(Contracts::set_code(RuntimeOrigin::root(), addr.clone(), new_code_hash)); @@ -4239,20 +3095,9 @@ fn slash_cannot_kill_account() { let _ = ::Currency::set_balance(&ALICE, 1_000_000); let min_balance = Contracts::min_balance(); - let addr = Contracts::bare_instantiate( - ALICE, - value, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(value) + .build_and_unwrap_account_id(); // Drop previous events initialize_block(2); @@ -4303,29 +3148,13 @@ fn contract_reverted() { // Calling extrinsic: revert leads to an error assert_err_ignore_postinfo!( - Contracts::instantiate( - RuntimeOrigin::signed(ALICE), - 0, - GAS_LIMIT, - None, - code_hash, - input.clone(), - vec![], - ), + builder::instantiate(code_hash).data(input.clone()).build(), >::ContractReverted, ); // Calling extrinsic: revert leads to an error assert_err_ignore_postinfo!( - Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), - 0, - GAS_LIMIT, - None, - wasm, - input.clone(), - vec![], - ), + builder::instantiate_with_code(wasm).data(input.clone()).build(), >::ContractReverted, ); @@ -4333,66 +3162,26 @@ fn contract_reverted() { // This is just a different way of transporting the error that allows the read out // the `data` which is only there on success. Obviously, the contract isn't // instantiated. - let result = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Existing(code_hash), - input.clone(), - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap(); + let result = builder::bare_instantiate(Code::Existing(code_hash)) + .data(input.clone()) + .build_and_unwrap_result(); assert_eq!(result.result.flags, flags); assert_eq!(result.result.data, buffer); assert!(!>::contains_key(result.account_id)); // Pass empty flags and therefore successfully instantiate the contract for later use. - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Existing(code_hash), - ReturnFlags::empty().bits().encode(), - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Existing(code_hash)) + .data(ReturnFlags::empty().bits().encode()) + .build_and_unwrap_account_id(); // Calling extrinsic: revert leads to an error assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - input.clone() - ), + builder::call(addr.clone()).data(input.clone()).build(), >::ContractReverted, ); // Calling directly: revert leads to success but the flags indicate the error - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - input, - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(addr.clone()).data(input).build_and_unwrap_result(); assert_eq!(result.flags, flags); assert_eq!(result.data, buffer); }); @@ -4407,20 +3196,9 @@ fn set_code_hash() { let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Instantiate the 'caller' - let contract_addr = Contracts::bare_instantiate( - ALICE, - 300_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let contract_addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(300_000) + .build_and_unwrap_account_id(); // upload new code assert_ok!(Contracts::upload_code( RuntimeOrigin::signed(ALICE), @@ -4432,35 +3210,16 @@ fn set_code_hash() { System::reset_events(); // First call sets new code_hash and returns 1 - let result = Contracts::bare_call( - ALICE, - contract_addr.clone(), - 0, - GAS_LIMIT, - None, - new_code_hash.as_ref().to_vec(), - DebugInfo::UnsafeDebug, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(contract_addr.clone()) + .data(new_code_hash.as_ref().to_vec()) + .debug(DebugInfo::UnsafeDebug) + .build_and_unwrap_result(); assert_return_code!(result, 1); // Second calls new contract code that returns 2 - let result = Contracts::bare_call( - ALICE, - contract_addr.clone(), - 0, - GAS_LIMIT, - None, - vec![], - DebugInfo::UnsafeDebug, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result = builder::bare_call(contract_addr.clone()) + .debug(DebugInfo::UnsafeDebug) + .build_and_unwrap_result(); assert_return_code!(result, 2); // Checking for the last event only @@ -4512,37 +3271,16 @@ fn storage_deposit_limit_is_enforced() { // Setting insufficient storage_deposit should fail. assert_err!( - Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - Some((2 * min_balance + 3 - 1).into()), /* expected deposit is 2 * ed + 3 for - * the call */ - Code::Upload(wasm.clone()), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result, + builder::bare_instantiate(Code::Upload(wasm.clone())) + // expected deposit is 2 * ed + 3 for the call + .storage_deposit_limit(Some((2 * min_balance + 3 - 1).into())) + .build() + .result, >::StorageDepositLimitExhausted, ); // Instantiate the BOB contract. - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_account_id(); let info_deposit = test_utils::contract_info_storage_deposit(&addr); // Check that the BOB contract has been instantiated and has the minimum balance @@ -4553,14 +3291,10 @@ fn storage_deposit_limit_is_enforced() { // setting insufficient deposit limit, as it requires 3 Balance: // 2 for the item added + 1 for the new storage item. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(2)), - 1u32.to_le_bytes().to_vec() - ), + builder::call(addr.clone()) + .storage_deposit_limit(Some(codec::Compact(2))) + .data(1u32.to_le_bytes().to_vec()) + .build(), >::StorageDepositLimitExhausted, ); @@ -4571,26 +3305,12 @@ fn storage_deposit_limit_is_enforced() { // Create 1 byte of storage, should cost 3 Balance: // 2 for the item added + 1 for the new storage item. // Should pass as it fallbacks to DefaultDepositLimit. - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - 1u32.to_le_bytes().to_vec() - )); + assert_ok!(builder::call(addr.clone()).data(1u32.to_le_bytes().to_vec()).build()); // Use 4 more bytes of the storage for the same item, which requires 4 Balance. // Should fail as DefaultDepositLimit is 3 and hence isn't enough. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - None, - 5u32.to_le_bytes().to_vec() - ), + builder::call(addr.clone()).data(5u32.to_le_bytes().to_vec()).build(), >::StorageDepositLimitExhausted, ); }); @@ -4605,45 +3325,17 @@ fn deposit_limit_in_nested_calls() { let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Create both contracts: Constructors do nothing. - let addr_caller = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm_caller), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; - let addr_callee = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm_callee), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr_caller = + builder::bare_instantiate(Code::Upload(wasm_caller)).build_and_unwrap_account_id(); + let addr_callee = + builder::bare_instantiate(Code::Upload(wasm_callee)).build_and_unwrap_account_id(); // Create 100 bytes of storage with a price of per byte // This is 100 Balance + 2 Balance for the item - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr_callee.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(102)), - 100u32.to_le_bytes().to_vec() - )); + assert_ok!(builder::call(addr_callee.clone()) + .storage_deposit_limit(Some(codec::Compact(102))) + .data(100u32.to_le_bytes().to_vec()) + .build()); // We do not remove any storage but add a storage item of 12 bytes in the caller // contract. This would cost 12 + 2 = 14 Balance. @@ -4651,14 +3343,10 @@ fn deposit_limit_in_nested_calls() { // This should fail as the specified parent's limit is less than the cost: 13 < // 14. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr_caller.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(13)), - (100u32, &addr_callee, 0u64).encode(), - ), + builder::call(addr_caller.clone()) + .storage_deposit_limit(Some(codec::Compact(13))) + .data((100u32, &addr_callee, 0u64).encode()) + .build(), >::StorageDepositLimitExhausted, ); // Now we specify the parent's limit high enough to cover the caller's storage additions. @@ -4668,14 +3356,10 @@ fn deposit_limit_in_nested_calls() { // This should fail as the specified parent's limit is less than the cost: 14 // < 15. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr_caller.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(14)), - (101u32, &addr_callee, 0u64).encode(), - ), + builder::call(addr_caller.clone()) + .storage_deposit_limit(Some(codec::Compact(14))) + .data((101u32, &addr_callee, 0u64).encode()) + .build(), >::StorageDepositLimitExhausted, ); @@ -4685,14 +3369,10 @@ fn deposit_limit_in_nested_calls() { // call should have a deposit limit of at least 2 Balance. The sub-call should be rolled // back, which is covered by the next test case. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr_caller.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(16)), - (102u32, &addr_callee, 1u64).encode(), - ), + builder::call(addr_caller.clone()) + .storage_deposit_limit(Some(codec::Compact(16))) + .data((102u32, &addr_callee, 1u64).encode()) + .build(), >::StorageDepositLimitExhausted, ); @@ -4700,14 +3380,10 @@ fn deposit_limit_in_nested_calls() { // caller. Note that if previous sub-call wouldn't roll back, this call would pass making // the test case fail. We don't set a special limit for the nested call here. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr_caller.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(0)), - (87u32, &addr_callee, 0u64).encode(), - ), + builder::call(addr_caller.clone()) + .storage_deposit_limit(Some(codec::Compact(0))) + .data((87u32, &addr_callee, 0u64).encode()) + .build(), >::StorageDepositLimitExhausted, ); @@ -4716,28 +3392,19 @@ fn deposit_limit_in_nested_calls() { // Require more than the sender's balance. // We don't set a special limit for the nested call. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr_caller.clone(), - 0, - GAS_LIMIT, - None, - (1200u32, &addr_callee, 1u64).encode(), - ), + builder::call(addr_caller.clone()) + .data((1200u32, &addr_callee, 1u64).encode()) + .build(), >::StorageDepositLimitExhausted, ); // Same as above but allow for the additional deposit of 1 Balance in parent. // We set the special deposit limit of 1 Balance for the nested call, which isn't // enforced as callee frees up storage. This should pass. - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr_caller.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(1)), - (87u32, &addr_callee, 1u64).encode(), - )); + assert_ok!(builder::call(addr_caller.clone()) + .storage_deposit_limit(Some(codec::Compact(1))) + .data((87u32, &addr_callee, 1u64).encode()) + .build()); }); } @@ -4751,35 +3418,13 @@ fn deposit_limit_in_nested_instantiate() { let _ = ::Currency::set_balance(&ALICE, 1_000_000); let _ = ::Currency::set_balance(&BOB, 1_000_000); // Create caller contract - let addr_caller = Contracts::bare_instantiate( - ALICE, - 10_000u64, // this balance is later passed to the deployed contract - GAS_LIMIT, - None, - Code::Upload(wasm_caller), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr_caller = builder::bare_instantiate(Code::Upload(wasm_caller)) + .value(10_000u64) // this balance is later passed to the deployed contract + .build_and_unwrap_account_id(); // Deploy a contract to get its occupied storage size - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm_callee), - vec![0, 0, 0, 0], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm_callee)) + .data(vec![0, 0, 0, 0]) + .build_and_unwrap_account_id(); let callee_info_len = ContractInfoOf::::get(&addr).unwrap().encoded_size() as u64; @@ -4795,14 +3440,11 @@ fn deposit_limit_in_nested_instantiate() { // Provided the limit is set to be 1 Balance less, // this call should fail on the return from the caller contract. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(BOB), - addr_caller.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(callee_info_len + 2 + ED + 1)), - (0u32, &code_hash_callee, 0u64).encode(), - ), + builder::call(addr_caller.clone()) + .origin(RuntimeOrigin::signed(BOB)) + .storage_deposit_limit(Some(codec::Compact(callee_info_len + 2 + ED + 1))) + .data((0u32, &code_hash_callee, 0u64).encode()) + .build(), >::StorageDepositLimitExhausted, ); // The charges made on instantiation should be rolled back. @@ -4812,14 +3454,11 @@ fn deposit_limit_in_nested_instantiate() { // byte in the constructor. Hence +1 Balance to the limit is needed. This should fail on the // return from constructor. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(BOB), - addr_caller.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(callee_info_len + 2 + ED + 2)), - (1u32, &code_hash_callee, 0u64).encode(), - ), + builder::call(addr_caller.clone()) + .origin(RuntimeOrigin::signed(BOB)) + .storage_deposit_limit(Some(codec::Compact(callee_info_len + 2 + ED + 2))) + .data((1u32, &code_hash_callee, 0u64).encode()) + .build(), >::StorageDepositLimitExhausted, ); // The charges made on the instantiation should be rolled back. @@ -4829,14 +3468,11 @@ fn deposit_limit_in_nested_instantiate() { // This should fail during the charging for the instantiation in // `RawMeter::charge_instantiate()` assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(BOB), - addr_caller.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(callee_info_len + 2 + ED + 2)), - (0u32, &code_hash_callee, callee_info_len + 2 + ED + 1).encode(), - ), + builder::call(addr_caller.clone()) + .origin(RuntimeOrigin::signed(BOB)) + .storage_deposit_limit(Some(codec::Compact(callee_info_len + 2 + ED + 2))) + .data((0u32, &code_hash_callee, callee_info_len + 2 + ED + 1).encode()) + .build(), >::StorageDepositLimitExhausted, ); // The charges made on the instantiation should be rolled back. @@ -4847,31 +3483,22 @@ fn deposit_limit_in_nested_instantiate() { // Now we set enough limit for the parent call, but insufficient limit for child // instantiate. This should fail right after the constructor execution. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(BOB), - addr_caller.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(callee_info_len + 2 + ED + 3)), // enough parent limit - (1u32, &code_hash_callee, callee_info_len + 2 + ED + 2).encode(), - ), + builder::call(addr_caller.clone()) + .origin(RuntimeOrigin::signed(BOB)) + .storage_deposit_limit(Some(codec::Compact(callee_info_len + 2 + ED + 3))) // enough parent limit + .data((1u32, &code_hash_callee, callee_info_len + 2 + ED + 2).encode()) + .build(), >::StorageDepositLimitExhausted, ); // The charges made on the instantiation should be rolled back. assert_eq!(::Currency::free_balance(&BOB), 1_000_000); // Set enough deposit limit for the child instantiate. This should succeed. - let result = Contracts::bare_call( - BOB, - addr_caller.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(callee_info_len + 2 + ED + 4).into()), - (1u32, &code_hash_callee, callee_info_len + 2 + ED + 3).encode(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); + let result = builder::bare_call(addr_caller.clone()) + .origin(BOB) + .storage_deposit_limit(Some(codec::Compact(callee_info_len + 2 + ED + 4).into())) + .data((1u32, &code_hash_callee, callee_info_len + 2 + ED + 3).encode()) + .build(); let returned = result.result.unwrap(); // All balance of the caller except ED has been transferred to the callee. @@ -4891,7 +3518,7 @@ fn deposit_limit_in_nested_instantiate() { 1_000_000 - (callee_info_len + 2 + ED + 3) ); // Check that deposit due to be charged still includes these 3 Balance - assert_eq!(result.storage_deposit.charge_or_zero(), (callee_info_len + 2 + ED + 3),) + assert_eq!(result.storage_deposit.charge_or_zero(), (callee_info_len + 2 + ED + 3)) }); } @@ -4905,20 +3532,7 @@ fn deposit_limit_honors_liquidity_restrictions() { let min_balance = Contracts::min_balance(); // Instantiate the BOB contract. - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_account_id(); let info_deposit = test_utils::contract_info_storage_deposit(&addr); // Check that the contract has been instantiated and has the minimum balance @@ -4933,14 +3547,11 @@ fn deposit_limit_honors_liquidity_restrictions() { ) .unwrap(); assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(BOB), - addr.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(200)), - 100u32.to_le_bytes().to_vec() - ), + builder::call(addr.clone()) + .origin(RuntimeOrigin::signed(BOB)) + .storage_deposit_limit(Some(codec::Compact(200))) + .data(100u32.to_le_bytes().to_vec()) + .build(), >::StorageDepositNotEnoughFunds, ); assert_eq!(::Currency::free_balance(&BOB), min_balance); @@ -4956,20 +3567,7 @@ fn deposit_limit_honors_existential_deposit() { let min_balance = Contracts::min_balance(); // Instantiate the BOB contract. - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_account_id(); let info_deposit = test_utils::contract_info_storage_deposit(&addr); @@ -4979,14 +3577,11 @@ fn deposit_limit_honors_existential_deposit() { // check that the deposit can't bring the account below the existential deposit assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(BOB), - addr.clone(), - 0, - GAS_LIMIT, - Some(codec::Compact(900)), - 100u32.to_le_bytes().to_vec() - ), + builder::call(addr.clone()) + .origin(RuntimeOrigin::signed(BOB)) + .storage_deposit_limit(Some(codec::Compact(900))) + .data(100u32.to_le_bytes().to_vec()) + .build(), >::StorageDepositNotEnoughFunds, ); assert_eq!(::Currency::free_balance(&BOB), 1_000); @@ -5002,20 +3597,7 @@ fn deposit_limit_honors_min_leftover() { let min_balance = Contracts::min_balance(); // Instantiate the BOB contract. - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_account_id(); let info_deposit = test_utils::contract_info_storage_deposit(&addr); @@ -5026,14 +3608,12 @@ fn deposit_limit_honors_min_leftover() { // check that the minimum leftover (value send) is considered assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(BOB), - addr.clone(), - 400, - GAS_LIMIT, - Some(codec::Compact(500)), - 100u32.to_le_bytes().to_vec() - ), + builder::call(addr.clone()) + .origin(RuntimeOrigin::signed(BOB)) + .value(400) + .storage_deposit_limit(Some(codec::Compact(500))) + .data(100u32.to_le_bytes().to_vec()) + .build(), >::StorageDepositNotEnoughFunds, ); assert_eq!(::Currency::free_balance(&BOB), 1_000); @@ -5069,34 +3649,15 @@ fn cannot_instantiate_indeterministic_code() { // Try to instantiate directly from code assert_err_ignore_postinfo!( - Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), - 0, - GAS_LIMIT, - None, - wasm.clone(), - vec![], - vec![], - ), + builder::instantiate_with_code(wasm.clone()).build(), >::CodeRejected, ); assert_err!( - Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm.clone()), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result, + builder::bare_instantiate(Code::Upload(wasm.clone())).build().result, >::CodeRejected, ); - // Try to upload a non deterministic code as deterministic + // Try to upload a non-deterministic code as deterministic assert_err!( Contracts::upload_code( RuntimeOrigin::signed(ALICE), @@ -5116,48 +3677,17 @@ fn cannot_instantiate_indeterministic_code() { )); assert_err_ignore_postinfo!( - Contracts::instantiate( - RuntimeOrigin::signed(ALICE), - 0, - GAS_LIMIT, - None, - code_hash, - vec![], - vec![], - ), + builder::instantiate(code_hash).build(), >::Indeterministic, ); assert_err!( - Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Existing(code_hash), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result, + builder::bare_instantiate(Code::Existing(code_hash)).build().result, >::Indeterministic, ); // Deploy contract which instantiates another contract - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(caller_wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = + builder::bare_instantiate(Code::Upload(caller_wasm)).build_and_unwrap_account_id(); // Try to instantiate `code_hash` from another contract in deterministic mode assert_err!( @@ -5176,7 +3706,7 @@ fn cannot_instantiate_indeterministic_code() { >::Indeterministic, ); - // Instantiations are not allowed even in non determinism mode + // Instantiations are not allowed even in non-determinism mode assert_err!( >::bare_call( ALICE, @@ -5202,7 +3732,7 @@ fn cannot_set_code_indeterministic_code() { ExtBuilder::default().existential_deposit(200).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - // Put the non deterministic contract on-chain + // Put the non-deterministic contract on-chain assert_ok!(Contracts::upload_code( RuntimeOrigin::signed(ALICE), wasm, @@ -5211,22 +3741,10 @@ fn cannot_set_code_indeterministic_code() { )); // Create the contract that will call `seal_set_code_hash` - let caller_addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(caller_wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let caller_addr = + builder::bare_instantiate(Code::Upload(caller_wasm)).build_and_unwrap_account_id(); - // We do not allow to set the code hash to a non determinstic wasm + // We do not allow to set the code hash to a non-deterministic wasm assert_err!( >::bare_call( ALICE, @@ -5252,7 +3770,7 @@ fn delegate_call_indeterministic_code() { ExtBuilder::default().existential_deposit(200).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - // Put the non deterministic contract on-chain + // Put the non-deterministic contract on-chain assert_ok!(Contracts::upload_code( RuntimeOrigin::signed(ALICE), wasm, @@ -5261,20 +3779,8 @@ fn delegate_call_indeterministic_code() { )); // Create the contract that will call `seal_delegate_call` - let caller_addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(caller_wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let caller_addr = + builder::bare_instantiate(Code::Upload(caller_wasm)).build_and_unwrap_account_id(); // The delegate call will fail in deterministic mode assert_err!( @@ -5293,7 +3799,7 @@ fn delegate_call_indeterministic_code() { >::Indeterministic, ); - // The delegate call will work on non deterministic mode + // The delegate call will work on non-deterministic mode assert_ok!( >::bare_call( ALICE, @@ -5331,17 +3837,9 @@ fn locking_delegate_dependency_works() { // Instantiate the caller contract with the given input. let instantiate = |input: &(u32, H256)| { - Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm_caller.clone()), - input.encode(), - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) + builder::bare_instantiate(Code::Upload(wasm_caller.clone())) + .data(input.encode()) + .build() }; // Call contract with the given input. @@ -5429,7 +3927,7 @@ fn locking_delegate_dependency_works() { contract.storage_base_deposit() - ED ); - // Removing an unexisting dependency should fail. + // Removing a nonexistent dependency should fail. assert_err!( call(&addr_caller, &unlock_delegate_dependency_input).result, Error::::DelegateDependencyNotFound @@ -5509,17 +4007,7 @@ fn native_dependency_deposit_works() { }; // Instantiate the set_code_hash contract. - let res = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - code, - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ); + let res = builder::bare_instantiate(code).build(); let addr = res.result.unwrap().account_id; let base_deposit = ED + test_utils::contract_info_storage_deposit(&addr); @@ -5567,37 +4055,17 @@ fn reentrance_count_works_with_call() { ExtBuilder::default().existential_deposit(100).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let contract_addr = Contracts::bare_instantiate( - ALICE, - 300_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let contract_addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(300_000) + .build_and_unwrap_account_id(); // passing reentrant count to the input let input = 0.encode(); - Contracts::bare_call( - ALICE, - contract_addr, - 0, - GAS_LIMIT, - None, - input, - DebugInfo::UnsafeDebug, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + builder::bare_call(contract_addr) + .data(input) + .debug(DebugInfo::UnsafeDebug) + .build_and_unwrap_result(); }); } @@ -5608,37 +4076,17 @@ fn reentrance_count_works_with_delegated_call() { ExtBuilder::default().existential_deposit(100).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let contract_addr = Contracts::bare_instantiate( - ALICE, - 300_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let contract_addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(300_000) + .build_and_unwrap_account_id(); // adding a callstack height to the input let input = (code_hash, 1).encode(); - Contracts::bare_call( - ALICE, - contract_addr.clone(), - 0, - GAS_LIMIT, - None, - input, - DebugInfo::UnsafeDebug, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + builder::bare_call(contract_addr.clone()) + .data(input) + .debug(DebugInfo::UnsafeDebug) + .build_and_unwrap_result(); }); } @@ -5651,63 +4099,23 @@ fn account_reentrance_count_works() { ExtBuilder::default().existential_deposit(100).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let contract_addr = Contracts::bare_instantiate( - ALICE, - 300_000, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let contract_addr = builder::bare_instantiate(Code::Upload(wasm)) + .value(300_000) + .build_and_unwrap_account_id(); - let another_contract_addr = Contracts::bare_instantiate( - ALICE, - 300_000, - GAS_LIMIT, - None, - Code::Upload(wasm_reentrance_count), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let another_contract_addr = builder::bare_instantiate(Code::Upload(wasm_reentrance_count)) + .value(300_000) + .build_and_unwrap_account_id(); - let result1 = Contracts::bare_call( - ALICE, - contract_addr.clone(), - 0, - GAS_LIMIT, - None, - contract_addr.encode(), - DebugInfo::UnsafeDebug, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result1 = builder::bare_call(contract_addr.clone()) + .data(contract_addr.encode()) + .debug(DebugInfo::UnsafeDebug) + .build_and_unwrap_result(); - let result2 = Contracts::bare_call( - ALICE, - contract_addr.clone(), - 0, - GAS_LIMIT, - None, - another_contract_addr.encode(), - DebugInfo::UnsafeDebug, - CollectEvents::Skip, - Determinism::Enforced, - ) - .result - .unwrap(); + let result2 = builder::bare_call(contract_addr.clone()) + .data(another_contract_addr.encode()) + .debug(DebugInfo::UnsafeDebug) + .build_and_unwrap_result(); assert_eq!(result1.data, 1.encode()); assert_eq!(result2.data, 0.encode()); @@ -5754,7 +4162,7 @@ fn signed_cannot_set_code() { fn none_cannot_call_code() { ExtBuilder::default().build().execute_with(|| { assert_noop!( - Contracts::call(RuntimeOrigin::none(), BOB, 0, GAS_LIMIT, None, Vec::new()), + builder::call(BOB).origin(RuntimeOrigin::none()).build(), DispatchError::BadOrigin, ); }); @@ -5767,30 +4175,10 @@ fn root_can_call() { ExtBuilder::default().existential_deposit(100).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_account_id(); // Call the contract. - assert_ok!(Contracts::call( - RuntimeOrigin::root(), - addr.clone(), - 0, - GAS_LIMIT, - None, - vec![] - )); + assert_ok!(builder::call(addr.clone()).origin(RuntimeOrigin::root()).build()); }); } @@ -5800,15 +4188,7 @@ fn root_cannot_instantiate_with_code() { ExtBuilder::default().build().execute_with(|| { assert_err_ignore_postinfo!( - Contracts::instantiate_with_code( - RuntimeOrigin::root(), - 0, - GAS_LIMIT, - None, - wasm, - vec![], - vec![], - ), + builder::instantiate_with_code(wasm).origin(RuntimeOrigin::root()).build(), DispatchError::BadOrigin ); }); @@ -5820,15 +4200,7 @@ fn root_cannot_instantiate() { ExtBuilder::default().build().execute_with(|| { assert_err_ignore_postinfo!( - Contracts::instantiate( - RuntimeOrigin::root(), - 0, - GAS_LIMIT, - None, - code_hash, - vec![], - vec![], - ), + builder::instantiate(code_hash).origin(RuntimeOrigin::root()).build(), DispatchError::BadOrigin ); }); @@ -5881,53 +4253,25 @@ fn only_instantiation_origin_can_instantiate() { let _ = Balances::set_balance(&BOB, 1_000_000); assert_err_ignore_postinfo!( - Contracts::instantiate_with_code( - RuntimeOrigin::root(), - 0, - GAS_LIMIT, - None, - code.clone(), - vec![], - vec![], - ), + builder::instantiate_with_code(code.clone()) + .origin(RuntimeOrigin::root()) + .build(), DispatchError::BadOrigin ); assert_err_ignore_postinfo!( - Contracts::instantiate_with_code( - RuntimeOrigin::signed(BOB), - 0, - GAS_LIMIT, - None, - code.clone(), - vec![], - vec![], - ), + builder::instantiate_with_code(code.clone()) + .origin(RuntimeOrigin::signed(BOB)) + .build(), DispatchError::BadOrigin ); // Only Alice can instantiate - assert_ok!(Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), - 0, - GAS_LIMIT, - None, - code, - vec![], - vec![], - ),); + assert_ok!(builder::instantiate_with_code(code).build()); // Bob cannot instantiate with either `instantiate_with_code` or `instantiate`. assert_err_ignore_postinfo!( - Contracts::instantiate( - RuntimeOrigin::signed(BOB), - 0, - GAS_LIMIT, - None, - code_hash, - vec![], - vec![], - ), + builder::instantiate(code_hash).origin(RuntimeOrigin::signed(BOB)).build(), DispatchError::BadOrigin ); }); @@ -5940,44 +4284,18 @@ fn balance_api_returns_free_balance() { let _ = ::Currency::set_balance(&ALICE, 1_000_000); // Instantiate the BOB contract without any extra balance. - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm.to_vec()), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = + builder::bare_instantiate(Code::Upload(wasm.to_vec())).build_and_unwrap_account_id(); let value = 0; // Call BOB which makes it call the balance runtime API. // The contract code asserts that the returned balance is 0. - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - value, - GAS_LIMIT, - None, - vec![] - )); + assert_ok!(builder::call(addr.clone()).value(value).build()); let value = 1; // Calling with value will trap the contract. assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - addr.clone(), - value, - GAS_LIMIT, - None, - vec![] - ), + builder::call(addr.clone()).value(value).build(), >::ContractTrapped ); }); @@ -5989,37 +4307,14 @@ fn gas_consumed_is_linear_for_nested_calls() { ExtBuilder::default().existential_deposit(200).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let addr = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(code), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; + let addr = builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_account_id(); let max_call_depth = ::CallStack::size() as u32; let [gas_0, gas_1, gas_2, gas_max] = { [0u32, 1u32, 2u32, max_call_depth] .iter() .map(|i| { - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - None, - i.encode(), - DebugInfo::Skip, - CollectEvents::Skip, - Determinism::Enforced, - ); + let result = builder::bare_call(addr.clone()).data(i.encode()).build(); assert_ok!(result.result); result.gas_consumed }) diff --git a/substrate/frame/contracts/src/tests/builder.rs b/substrate/frame/contracts/src/tests/builder.rs new file mode 100644 index 0000000000000000000000000000000000000000..08d12503a290ad30a08c44daf73647e278b9a398 --- /dev/null +++ b/substrate/frame/contracts/src/tests/builder.rs @@ -0,0 +1,219 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{AccountId32, Test, ALICE, GAS_LIMIT}; +use crate::{ + tests::RuntimeOrigin, AccountIdLookupOf, AccountIdOf, BalanceOf, Code, CodeHash, CollectEvents, + ContractExecResult, ContractInstantiateResult, DebugInfo, Determinism, EventRecordOf, + ExecReturnValue, OriginFor, Pallet, Weight, +}; +use codec::Compact; +use frame_support::pallet_prelude::DispatchResultWithPostInfo; +use paste::paste; + +/// Helper macro to generate a builder for contract API calls. +macro_rules! builder { + // Entry point to generate a builder for the given method. + ( + $method:ident($($field:ident: $type:ty,)*) -> $result:ty + ) => { + paste!{ + builder!([< $method:camel Builder >], $method($($field: $type,)* ) -> $result); + } + }; + // Generate the builder struct and its methods. + ( + $name:ident, + $method:ident( + $($field:ident: $type:ty,)* + ) -> $result:ty + ) => { + #[doc = concat!("A builder to construct a ", stringify!($method), " call")] + pub struct $name { + $($field: $type,)* + } + + #[allow(dead_code)] + impl $name + { + $( + #[doc = concat!("Set the ", stringify!($field))] + pub fn $field(mut self, value: $type) -> Self { + self.$field = value; + self + } + )* + + #[doc = concat!("Build the ", stringify!($method), " call")] + pub fn build(self) -> $result { + Pallet::::$method( + $(self.$field,)* + ) + } + } + } +} + +builder!( + instantiate_with_code( + origin: OriginFor, + value: BalanceOf, + gas_limit: Weight, + storage_deposit_limit: Option>>, + code: Vec, + data: Vec, + salt: Vec, + ) -> DispatchResultWithPostInfo +); + +builder!( + instantiate( + origin: OriginFor, + value: BalanceOf, + gas_limit: Weight, + storage_deposit_limit: Option>>, + code_hash: CodeHash, + data: Vec, + salt: Vec, + ) -> DispatchResultWithPostInfo +); + +builder!( + bare_instantiate( + origin: AccountIdOf, + value: BalanceOf, + gas_limit: Weight, + storage_deposit_limit: Option>, + code: Code>, + data: Vec, + salt: Vec, + debug: DebugInfo, + collect_events: CollectEvents, + ) -> ContractInstantiateResult, BalanceOf, EventRecordOf> +); + +builder!( + call( + origin: OriginFor, + dest: AccountIdLookupOf, + value: BalanceOf, + gas_limit: Weight, + storage_deposit_limit: Option>>, + data: Vec, + ) -> DispatchResultWithPostInfo +); + +builder!( + bare_call( + origin: AccountIdOf, + dest: AccountIdOf, + value: BalanceOf, + gas_limit: Weight, + storage_deposit_limit: Option>, + data: Vec, + debug: DebugInfo, + collect_events: CollectEvents, + determinism: Determinism, + ) -> ContractExecResult, EventRecordOf> +); + +/// Create a [`BareInstantiateBuilder`] with default values. +pub fn bare_instantiate(code: Code>) -> BareInstantiateBuilder { + BareInstantiateBuilder { + origin: ALICE, + value: 0, + gas_limit: GAS_LIMIT, + storage_deposit_limit: None, + code, + data: vec![], + salt: vec![], + debug: DebugInfo::Skip, + collect_events: CollectEvents::Skip, + } +} + +impl BareInstantiateBuilder { + /// Build the instantiate call and unwrap the result. + pub fn build_and_unwrap_result(self) -> crate::InstantiateReturnValue> { + self.build().result.unwrap() + } + + /// Build the instantiate call and unwrap the account id. + pub fn build_and_unwrap_account_id(self) -> AccountIdOf { + self.build().result.unwrap().account_id + } +} + +/// Create a [`BareCallBuilder`] with default values. +pub fn bare_call(dest: AccountId32) -> BareCallBuilder { + BareCallBuilder { + origin: ALICE, + dest, + value: 0, + gas_limit: GAS_LIMIT, + storage_deposit_limit: None, + data: vec![], + debug: DebugInfo::Skip, + collect_events: CollectEvents::Skip, + determinism: Determinism::Enforced, + } +} + +impl BareCallBuilder { + /// Build the call and unwrap the result. + pub fn build_and_unwrap_result(self) -> ExecReturnValue { + self.build().result.unwrap() + } +} + +/// Create an [`InstantiateWithCodeBuilder`] with default values. +pub fn instantiate_with_code(code: Vec) -> InstantiateWithCodeBuilder { + InstantiateWithCodeBuilder { + origin: RuntimeOrigin::signed(ALICE), + value: 0, + gas_limit: GAS_LIMIT, + storage_deposit_limit: None, + code, + data: vec![], + salt: vec![], + } +} + +/// Create an [`InstantiateBuilder`] with default values. +pub fn instantiate(code_hash: CodeHash) -> InstantiateBuilder { + InstantiateBuilder { + origin: RuntimeOrigin::signed(ALICE), + value: 0, + gas_limit: GAS_LIMIT, + storage_deposit_limit: None, + code_hash, + data: vec![], + salt: vec![], + } +} + +/// Create a [`CallBuilder`] with default values. +pub fn call(dest: AccountIdLookupOf) -> CallBuilder { + CallBuilder { + origin: RuntimeOrigin::signed(ALICE), + dest, + value: 0, + gas_limit: GAS_LIMIT, + storage_deposit_limit: None, + data: vec![], + } +} diff --git a/substrate/frame/contracts/src/wasm/mod.rs b/substrate/frame/contracts/src/wasm/mod.rs index af287a6f2dff946fa5d3e688ff8a71d9db4c22a3..e6af62c72891d4cfb42a4b7d65127391e0eff9a0 100644 --- a/substrate/frame/contracts/src/wasm/mod.rs +++ b/substrate/frame/contracts/src/wasm/mod.rs @@ -1425,7 +1425,7 @@ mod tests { #[test] fn contract_ecdsa_to_eth_address() { - /// calls `seal_ecdsa_to_eth_address` for the contstant and ensures the result equals the + /// calls `seal_ecdsa_to_eth_address` for the constant and ensures the result equals the /// expected one. const CODE_ECDSA_TO_ETH_ADDRESS: &str = r#" (module diff --git a/substrate/frame/contracts/src/wasm/runtime.rs b/substrate/frame/contracts/src/wasm/runtime.rs index 7d43d30ba580dee2f69bdbe3b51638e9d98e61cb..28a08ab0224ddf4603c881b50c9359d4a6f54d46 100644 --- a/substrate/frame/contracts/src/wasm/runtime.rs +++ b/substrate/frame/contracts/src/wasm/runtime.rs @@ -25,12 +25,8 @@ use crate::{ }; use codec::{Decode, DecodeLimit, Encode, MaxEncodedLen}; use frame_support::{ - dispatch::DispatchInfo, - ensure, - pallet_prelude::{DispatchResult, DispatchResultWithPostInfo}, - parameter_types, - traits::Get, - weights::Weight, + dispatch::DispatchInfo, ensure, pallet_prelude::DispatchResultWithPostInfo, parameter_types, + traits::Get, weights::Weight, }; use pallet_contracts_proc_macro::define_env; use pallet_contracts_uapi::{CallFlags, ReturnFlags}; @@ -41,9 +37,6 @@ use sp_runtime::{ }; use sp_std::{fmt, prelude::*}; use wasmi::{core::HostError, errors::LinkerError, Linker, Memory, Store}; -use xcm::VersionedXcm; - -type CallOf = ::RuntimeCall; /// The maximum nesting depth a contract can use when encoding types. const MAX_DECODE_NESTING: u32 = 256; @@ -245,7 +238,7 @@ pub enum RuntimeCosts { /// Weight of calling `account_reentrance_count` AccountEntranceCount, /// Weight of calling `instantiation_nonce` - InstantationNonce, + InstantiationNonce, /// Weight of calling `lock_delegate_dependency` LockDelegateDependency, /// Weight of calling `unlock_delegate_dependency` @@ -337,7 +330,7 @@ impl Token for RuntimeCosts { EcdsaToEthAddress => s.ecdsa_to_eth_address, ReentrantCount => s.reentrance_count, AccountEntranceCount => s.account_reentrance_count, - InstantationNonce => s.instantiation_nonce, + InstantiationNonce => s.instantiation_nonce, LockDelegateDependency => s.lock_delegate_dependency, UnlockDelegateDependency => s.unlock_delegate_dependency, } @@ -378,29 +371,6 @@ fn already_charged(_: u32) -> Option { None } -/// Ensure that the XCM program is executable, by checking that it does not contain any [`Transact`] -/// instruction with a call that is not allowed by the CallFilter. -fn ensure_executable(message: &VersionedXcm>) -> DispatchResult { - use frame_support::traits::Contains; - use xcm::prelude::{Transact, Xcm}; - - let mut message: Xcm> = - message.clone().try_into().map_err(|_| Error::::XCMDecodeFailed)?; - - message.iter_mut().try_for_each(|inst| -> DispatchResult { - let Transact { ref mut call, .. } = inst else { return Ok(()) }; - let call = call.ensure_decoded().map_err(|_| Error::::XCMDecodeFailed)?; - - if !::CallFilter::contains(call) { - return Err(frame_system::Error::::CallFiltered.into()) - } - - Ok(()) - })?; - - Ok(()) -} - /// Can only be used for one call. pub struct Runtime<'a, E: Ext + 'a> { ext: &'a mut E, @@ -2112,16 +2082,13 @@ pub mod env { msg_len: u32, ) -> Result { use frame_support::dispatch::DispatchInfo; - use xcm::VersionedXcm; use xcm_builder::{ExecuteController, ExecuteControllerWeightInfo}; ctx.charge_gas(RuntimeCosts::CopyFromContract(msg_len))?; - let message: VersionedXcm> = - ctx.read_sandbox_memory_as_unbounded(memory, msg_ptr, msg_len)?; - ensure_executable::(&message)?; + let message = ctx.read_sandbox_memory_as_unbounded(memory, msg_ptr, msg_len)?; let execute_weight = - <::Xcm as ExecuteController<_, _>>::WeightInfo::execute(); + <::Xcm as ExecuteController<_, _>>::WeightInfo::execute_blob(); let weight = ctx.ext.gas_meter().gas_left().max(execute_weight); let dispatch_info = DispatchInfo { weight, ..Default::default() }; @@ -2130,9 +2097,9 @@ pub mod env { RuntimeCosts::CallXcmExecute, |ctx| { let origin = crate::RawOrigin::Signed(ctx.ext.address().clone()).into(); - let weight_used = <::Xcm>::execute( + let weight_used = <::Xcm>::execute_blob( origin, - Box::new(message), + message, weight.saturating_sub(execute_weight), )?; @@ -2152,19 +2119,18 @@ pub mod env { msg_len: u32, output_ptr: u32, ) -> Result { - use xcm::{VersionedLocation, VersionedXcm}; + use xcm::VersionedLocation; use xcm_builder::{SendController, SendControllerWeightInfo}; ctx.charge_gas(RuntimeCosts::CopyFromContract(msg_len))?; let dest: VersionedLocation = ctx.read_sandbox_memory_as(memory, dest_ptr)?; - let message: VersionedXcm<()> = - ctx.read_sandbox_memory_as_unbounded(memory, msg_ptr, msg_len)?; - let weight = <::Xcm as SendController<_>>::WeightInfo::send(); + let message = ctx.read_sandbox_memory_as_unbounded(memory, msg_ptr, msg_len)?; + let weight = <::Xcm as SendController<_>>::WeightInfo::send_blob(); ctx.charge_gas(RuntimeCosts::CallRuntime(weight))?; let origin = crate::RawOrigin::Signed(ctx.ext.address().clone()).into(); - match <::Xcm>::send(origin, dest.into(), message.into()) { + match <::Xcm>::send_blob(origin, dest.into(), message) { Ok(message_id) => { ctx.write_sandbox_memory(memory, output_ptr, &message_id.encode())?; Ok(ReturnErrorCode::Success) @@ -2298,7 +2264,7 @@ pub mod env { /// Returns a nonce that is unique per contract instantiation. /// See [`pallet_contracts_uapi::HostFn::instantiation_nonce`]. fn instantiation_nonce(ctx: _, _memory: _) -> Result { - ctx.charge_gas(RuntimeCosts::InstantationNonce)?; + ctx.charge_gas(RuntimeCosts::InstantiationNonce)?; Ok(ctx.ext.nonce()) } diff --git a/substrate/frame/contracts/uapi/Cargo.toml b/substrate/frame/contracts/uapi/Cargo.toml index 12bb6b8fc2c0bddffee1cfbf7a3df0243ff421a6..d9a5ee14f0545a7481fcfd133887c74921659589 100644 --- a/substrate/frame/contracts/uapi/Cargo.toml +++ b/substrate/frame/contracts/uapi/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] paste = { version = "1.0", default-features = false } bitflags = "1.0" -scale-info = { version = "2.10.0", default-features = false, features = ["derive"], optional = true } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"], optional = true } scale = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", "max-encoded-len", diff --git a/substrate/frame/contracts/uapi/src/host.rs b/substrate/frame/contracts/uapi/src/host.rs index 04f58895ab4f6ccebb6452ebeec20038ce7f0d78..459cb59bead94fc5af18fb43955621f0e72d7e75 100644 --- a/substrate/frame/contracts/uapi/src/host.rs +++ b/substrate/frame/contracts/uapi/src/host.rs @@ -790,7 +790,7 @@ pub trait HostFn { /// /// # Parameters /// - /// - `dest`: The XCM destination, should be decodable as [VersionedMultiLocation](https://paritytech.github.io/polkadot-sdk/master/staging_xcm/enum.VersionedMultiLocation.html), + /// - `dest`: The XCM destination, should be decodable as [MultiLocation](https://paritytech.github.io/polkadot-sdk/master/staging_xcm/enum.VersionedLocation.html), /// traps otherwise. /// - `msg`: The message, should be decodable as a [VersionedXcm](https://paritytech.github.io/polkadot-sdk/master/staging_xcm/enum.VersionedXcm.html), /// traps otherwise. diff --git a/substrate/frame/conviction-voting/Cargo.toml b/substrate/frame/conviction-voting/Cargo.toml index ff5af995026f39527780d964aea16ec13be1ec2b..ffb5122ed7f980a547e2d299f0a687641f0d9161 100644 --- a/substrate/frame/conviction-voting/Cargo.toml +++ b/substrate/frame/conviction-voting/Cargo.toml @@ -21,7 +21,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", "max-encoded-len", ] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], optional = true, workspace = true, default-features = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/conviction-voting/src/tests.rs b/substrate/frame/conviction-voting/src/tests.rs index 5c582a35362ac3a705c8cfc1c5a41502f50d94fc..74baeace898b09084a20b5e8e431efb2973c149f 100644 --- a/substrate/frame/conviction-voting/src/tests.rs +++ b/substrate/frame/conviction-voting/src/tests.rs @@ -39,7 +39,7 @@ frame_support::construct_runtime!( } ); -// Test that a fitlered call can be dispatched. +// Test that a filtered call can be dispatched. pub struct BaseFilter; impl Contains for BaseFilter { fn contains(call: &RuntimeCall) -> bool { diff --git a/substrate/frame/core-fellowship/Cargo.toml b/substrate/frame/core-fellowship/Cargo.toml index 3e678d3274463f46619c19e71be97350fc1c5955..b4258281b7010855bb1f81aae4bce1cd64fb774e 100644 --- a/substrate/frame/core-fellowship/Cargo.toml +++ b/substrate/frame/core-fellowship/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/core-fellowship/src/benchmarking.rs b/substrate/frame/core-fellowship/src/benchmarking.rs index ddde70bd7ce16ec1b2de7ef5899fec5a5213bb79..fd5453310be51a65dac808ee2f07737ca8915203 100644 --- a/substrate/frame/core-fellowship/src/benchmarking.rs +++ b/substrate/frame/core-fellowship/src/benchmarking.rs @@ -149,6 +149,11 @@ mod benchmarks { #[benchmark] fn promote() -> Result<(), BenchmarkError> { + // Ensure that the `min_promotion_period` wont get in our way. + let mut params = Params::::get(); + params.min_promotion_period = [Zero::zero(); RANK_COUNT]; + Params::::put(¶ms); + let member = make_member::(1)?; ensure_evidence::(&member)?; diff --git a/substrate/frame/core-fellowship/src/lib.rs b/substrate/frame/core-fellowship/src/lib.rs index d1b81c3ca134fbe30a14034164e45710cea12317..afb188261fd448c2617189e0db71050d3d45234e 100644 --- a/substrate/frame/core-fellowship/src/lib.rs +++ b/substrate/frame/core-fellowship/src/lib.rs @@ -149,7 +149,8 @@ pub mod pallet { }; use frame_system::{ensure_root, pallet_prelude::*}; - const RANK_COUNT: usize = 9; + /// Number of available ranks. + pub(crate) const RANK_COUNT: usize = 9; #[pallet::pallet] pub struct Pallet(PhantomData<(T, I)>); diff --git a/substrate/frame/core-fellowship/src/tests/integration.rs b/substrate/frame/core-fellowship/src/tests/integration.rs index 3c2dc1de595513d6207f76fdf8e406b336a0d97b..d3bbac158056e7edbbf1e08b255d63450fa1c5c0 100644 --- a/substrate/frame/core-fellowship/src/tests/integration.rs +++ b/substrate/frame/core-fellowship/src/tests/integration.rs @@ -251,7 +251,7 @@ fn swap_exhaustive_works() { }); assert_eq!(root_add, root_swap); - // Ensure that we dont compare trivial stuff like `()` from a type error above. + // Ensure that we don't compare trivial stuff like `()` from a type error above. assert_eq!(root_add.len(), 32); }); } diff --git a/substrate/frame/democracy/Cargo.toml b/substrate/frame/democracy/Cargo.toml index 9a55cda5340c2fa795fd79b17ee4e2b5f568bed2..edd2d742b5069a73212a291aaf440de40e96fd24 100644 --- a/substrate/frame/democracy/Cargo.toml +++ b/substrate/frame/democracy/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], optional = true, workspace = true, default-features = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/democracy/src/lib.rs b/substrate/frame/democracy/src/lib.rs index 08e2a7599f5542a8b87ce80df8d96b5cf4ee9af5..f3d33a72f3ad1a088d4dc050d1e637acd162db7d 100644 --- a/substrate/frame/democracy/src/lib.rs +++ b/substrate/frame/democracy/src/lib.rs @@ -484,7 +484,7 @@ pub mod pallet { Blacklisted { proposal_hash: T::Hash }, /// An account has voted in a referendum Voted { voter: T::AccountId, ref_index: ReferendumIndex, vote: AccountVote> }, - /// An account has secconded a proposal + /// An account has seconded a proposal Seconded { seconder: T::AccountId, prop_index: PropIndex }, /// A proposal got canceled. ProposalCanceled { prop_index: PropIndex }, diff --git a/substrate/frame/democracy/src/migrations/unlock_and_unreserve_all_funds.rs b/substrate/frame/democracy/src/migrations/unlock_and_unreserve_all_funds.rs index 188c475f64d0ef3e55e6daf3f86f9423e93a4efa..1cb50a157b12bb5ea68fc870e870cda6536fd524 100644 --- a/substrate/frame/democracy/src/migrations/unlock_and_unreserve_all_funds.rs +++ b/substrate/frame/democracy/src/migrations/unlock_and_unreserve_all_funds.rs @@ -321,40 +321,40 @@ mod test { } #[test] - fn unreserve_works_for_depositer() { - let depositer_0 = 10; - let depositer_1 = 11; + fn unreserve_works_for_depositor() { + let depositor_0 = 10; + let depositor_1 = 11; let deposit = 25; - let depositer_0_initial_reserved = 0; - let depositer_1_initial_reserved = 15; + let depositor_0_initial_reserved = 0; + let depositor_1_initial_reserved = 15; let initial_balance = 100_000; new_test_ext().execute_with(|| { // Set up initial state. - ::Currency::make_free_balance_be(&depositer_0, initial_balance); - ::Currency::make_free_balance_be(&depositer_1, initial_balance); + ::Currency::make_free_balance_be(&depositor_0, initial_balance); + ::Currency::make_free_balance_be(&depositor_1, initial_balance); assert_ok!(::Currency::reserve( - &depositer_0, - depositer_0_initial_reserved + deposit + &depositor_0, + depositor_0_initial_reserved + deposit )); assert_ok!(::Currency::reserve( - &depositer_1, - depositer_1_initial_reserved + deposit + &depositor_1, + depositor_1_initial_reserved + deposit )); let depositors = BoundedVec::<_, ::MaxDeposits>::truncate_from(vec![ - depositer_0, - depositer_1, + depositor_0, + depositor_1, ]); DepositOf::::insert(0, (depositors, deposit)); // Sanity check: ensure initial reserved balance was set correctly. assert_eq!( - ::Currency::reserved_balance(&depositer_0), - depositer_0_initial_reserved + deposit + ::Currency::reserved_balance(&depositor_0), + depositor_0_initial_reserved + deposit ); assert_eq!( - ::Currency::reserved_balance(&depositer_1), - depositer_1_initial_reserved + deposit + ::Currency::reserved_balance(&depositor_1), + depositor_1_initial_reserved + deposit ); // Run the migration. @@ -365,12 +365,12 @@ mod test { // Assert the reserved balance was reduced by the expected amount. assert_eq!( - ::Currency::reserved_balance(&depositer_0), - depositer_0_initial_reserved + ::Currency::reserved_balance(&depositor_0), + depositor_0_initial_reserved ); assert_eq!( - ::Currency::reserved_balance(&depositer_1), - depositer_1_initial_reserved + ::Currency::reserved_balance(&depositor_1), + depositor_1_initial_reserved ); }); } diff --git a/substrate/frame/democracy/src/tests.rs b/substrate/frame/democracy/src/tests.rs index dd69f48dbc18a95fa3c0c1aea353258a18e4e091..e2946ba98156ad3480f85ae4e86ba4a2036ff59c 100644 --- a/substrate/frame/democracy/src/tests.rs +++ b/substrate/frame/democracy/src/tests.rs @@ -62,7 +62,7 @@ frame_support::construct_runtime!( } ); -// Test that a fitlered call can be dispatched. +// Test that a filtered call can be dispatched. pub struct BaseFilter; impl Contains for BaseFilter { fn contains(call: &RuntimeCall) -> bool { diff --git a/substrate/frame/democracy/src/tests/cancellation.rs b/substrate/frame/democracy/src/tests/cancellation.rs index 4384fe6a1641aa2f51723c70caeac7093dcb1951..b4c42f9c79053ad6db1641e7e431dc4be7661e9f 100644 --- a/substrate/frame/democracy/src/tests/cancellation.rs +++ b/substrate/frame/democracy/src/tests/cancellation.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! The tests for cancelation functionality. +//! The tests for cancellation functionality. use super::*; diff --git a/substrate/frame/election-provider-multi-phase/Cargo.toml b/substrate/frame/election-provider-multi-phase/Cargo.toml index eadce8c1ff847b2902d2d3ca9eb21d797060da4f..2074b51f50f475f077f814593da74b31b5dba6e0 100644 --- a/substrate/frame/election-provider-multi-phase/Cargo.toml +++ b/substrate/frame/election-provider-multi-phase/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = [ +scale-info = { version = "2.11.1", default-features = false, features = [ "derive", ] } log = { workspace = true } @@ -38,7 +38,7 @@ frame-election-provider-support = { path = "../election-provider-support", defau frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } pallet-election-provider-support-benchmarking = { path = "../election-provider-support/benchmarking", default-features = false, optional = true } rand = { version = "0.8.5", default-features = false, features = ["alloc", "small_rng"], optional = true } -strum = { version = "0.24.1", default-features = false, features = ["derive"], optional = true } +strum = { version = "0.26.2", default-features = false, features = ["derive"], optional = true } [dev-dependencies] parking_lot = "0.12.1" diff --git a/substrate/frame/election-provider-multi-phase/src/helpers.rs b/substrate/frame/election-provider-multi-phase/src/helpers.rs index 57d580e93016cf9be6e1dede6ebbeb95c1609826..a3f27fc18f07730ebbe9907ddd897a17ccb876e2 100644 --- a/substrate/frame/election-provider-multi-phase/src/helpers.rs +++ b/substrate/frame/election-provider-multi-phase/src/helpers.rs @@ -160,7 +160,7 @@ pub fn target_index_fn_linear( } /// Create a function that can map a voter index ([`SolutionVoterIndexOf`]) to the actual voter -/// account using a linearly indexible snapshot. +/// account using a linearly indexable snapshot. pub fn voter_at_fn( snapshot: &Vec>, ) -> impl Fn(SolutionVoterIndexOf) -> Option + '_ { @@ -172,7 +172,7 @@ pub fn voter_at_fn( } /// Create a function that can map a target index ([`SolutionTargetIndexOf`]) to the actual target -/// account using a linearly indexible snapshot. +/// account using a linearly indexable snapshot. pub fn target_at_fn( snapshot: &Vec, ) -> impl Fn(SolutionTargetIndexOf) -> Option + '_ { diff --git a/substrate/frame/election-provider-multi-phase/src/lib.rs b/substrate/frame/election-provider-multi-phase/src/lib.rs index 6bf4dfe4f1eb993e35a99a34bd21b2bde81d4355..11577cd3526233aca7efcc1da4f55d194785e300 100644 --- a/substrate/frame/election-provider-multi-phase/src/lib.rs +++ b/substrate/frame/election-provider-multi-phase/src/lib.rs @@ -586,10 +586,8 @@ pub mod pallet { type EstimateCallFee: EstimateCallFee, BalanceOf>; /// Duration of the unsigned phase. - #[pallet::constant] type UnsignedPhase: Get>; /// Duration of the signed phase. - #[pallet::constant] type SignedPhase: Get>; /// The minimum amount of improvement to the solution score that defines a solution as @@ -1134,7 +1132,7 @@ pub mod pallet { /// A solution was stored with the given compute. /// /// The `origin` indicates the origin of the solution. If `origin` is `Some(AccountId)`, - /// the stored solution was submited in the signed phase by a miner with the `AccountId`. + /// the stored solution was submitted in the signed phase by a miner with the `AccountId`. /// Otherwise, the solution was stored either during the unsigned phase or by /// `T::ForceOrigin`. The `bool` is `true` when a previous solution was ejected to make /// room for this one. @@ -1192,7 +1190,7 @@ pub mod pallet { BoundNotMet, /// Submitted solution has too many winners TooManyWinners, - /// Sumission was prepared for a different round. + /// Submission was prepared for a different round. PreDispatchDifferentRound, } diff --git a/substrate/frame/election-provider-multi-phase/src/unsigned.rs b/substrate/frame/election-provider-multi-phase/src/unsigned.rs index 94348181334061a3c2de81b90742f53c4390ddc9..8b25815eca13ec72491bdb1cb9135a69c540001f 100644 --- a/substrate/frame/election-provider-multi-phase/src/unsigned.rs +++ b/substrate/frame/election-provider-multi-phase/src/unsigned.rs @@ -1445,7 +1445,7 @@ mod tests { ) .unwrap(); let solution = RawSolution { solution: raw, score, round: MultiPhase::round() }; - // 12 is not better than 12. We need score of atleast 13 to be accepted. + // 12 is not better than 12. We need score of at least 13 to be accepted. assert_eq!(solution.score.minimal_stake, 12); // submitting this will panic. assert_noop!( @@ -1483,7 +1483,7 @@ mod tests { )); // trial 4: a solution who's minimal stake is 17, i.e. 4 better than the last - // soluton. + // solution. let result = ElectionResult { winners: vec![(10, 12)], assignments: vec![ diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml b/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml index e6384450a6fd632f206b7c9bf2bbc870604143e7..25c280921f8c7c8d3660c52555ae5e67093c1a9b 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dev-dependencies] parking_lot = "0.12.1" codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } -scale-info = { version = "2.10.0", features = ["derive"] } +scale-info = { version = "2.11.1", features = ["derive"] } log = { workspace = true } sp-runtime = { path = "../../../primitives/runtime" } diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs index 53bff50f7482fd61fb2c77594fa9c3a8957c0a8a..83083c912094bc4b39287f5105021fe2e44a4bb0 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs @@ -208,8 +208,8 @@ fn enters_emergency_phase_after_forcing_before_elect() { /// active validators. Thus, slashing a percentage of the current validators that is lower than /// `OffendingValidatorsThreshold` will never force a new era. However, as the slashes progress, if /// the subsequent elections do not meet the minimum election untrusted score, the election will -/// fail and enter in emenergency mode. -fn continous_slashes_below_offending_threshold() { +/// fail and enter in emergency mode. +fn continuous_slashes_below_offending_threshold() { let staking_builder = StakingExtBuilder::default().validator_count(10); let epm_builder = EpmExtBuilder::default().disable_emergency_throttling(); @@ -323,7 +323,7 @@ fn set_validation_intention_after_chilled() { } #[test] -/// Active ledger balance may fall below ED if account chills before unbounding. +/// Active ledger balance may fall below ED if account chills before unbonding. /// /// Unbonding call fails if the remaining ledger's stash balance falls below the existential /// deposit. However, if the stash is chilled before unbonding, the ledger's active balance may @@ -350,7 +350,7 @@ fn ledger_consistency_active_balance_below_ed() { // however, chilling works as expected. assert_ok!(Staking::chill(RuntimeOrigin::signed(11))); - // now unbonding the full active balance works, since remainer of the active balance is + // now unbonding the full active balance works, since remainder of the active balance is // not enforced to be below `MinNominatorBond` if the stash has been chilled. assert_ok!(Staking::unbond(RuntimeOrigin::signed(11), 1000)); @@ -479,7 +479,7 @@ fn automatic_unbonding_pools() { staking_events(), [ // auto-withdraw happened as expected to release 2's unbonding funds, but the funds - // were not transfered to 2 and stay in the pool's tranferrable balance instead. + // were not transferred to 2 and stay in the pool's transferrable balance instead. pallet_staking::Event::Withdrawn { stash: 7939698191839293293, amount: 10 }, pallet_staking::Event::Unbonded { stash: 7939698191839293293, amount: 10 } ] diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs index d7d2006d219318ead7fb727e824aa974d683349f..7efcc4701e27eadf6ff64caa3e7fea8671c68114 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs @@ -167,7 +167,7 @@ parameter_types! { pub static SignedPhase: BlockNumber = 10; pub static UnsignedPhase: BlockNumber = 10; // we expect a minimum of 3 blocks in signed phase and unsigned phases before trying - // enetering in emergency phase after the election failed. + // entering in emergency phase after the election failed. pub static MinBlocksBeforeEmergency: BlockNumber = 3; pub static MaxActiveValidators: u32 = 1000; pub static OffchainRepeat: u32 = 5; @@ -661,7 +661,7 @@ pub fn roll_to(n: BlockNumber, delay_solution: bool) { Session::on_initialize(b); Timestamp::set_timestamp(System::block_number() * BLOCK_TIME + INIT_TIMESTAMP); - // TODO(gpestana): implement a realistic OCW worker insted of simulating it + // TODO(gpestana): implement a realistic OCW worker instead of simulating it // https://github.com/paritytech/substrate/issues/13589 // if there's no solution queued and the solution should not be delayed, try mining and // queue a solution. diff --git a/substrate/frame/election-provider-support/Cargo.toml b/substrate/frame/election-provider-support/Cargo.toml index b182b831ea0d12c8b791d43ab793337a408dbd26..0d9748ee34e56fa5d43f0e238d2d69eda548eaec 100644 --- a/substrate/frame/election-provider-support/Cargo.toml +++ b/substrate/frame/election-provider-support/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-election-provider-solution-type = { path = "solution-type" } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/election-provider-support/solution-type/Cargo.toml b/substrate/frame/election-provider-support/solution-type/Cargo.toml index 1bf1165229a7dd20700bb561d4dd776ca7c250bb..09c6a492dd0d6ba498111fc2c8926212e7b35fc2 100644 --- a/substrate/frame/election-provider-support/solution-type/Cargo.toml +++ b/substrate/frame/election-provider-support/solution-type/Cargo.toml @@ -25,7 +25,7 @@ proc-macro-crate = "3.0.0" [dev-dependencies] parity-scale-codec = "3.6.1" -scale-info = "2.10.0" +scale-info = "2.11.1" sp-arithmetic = { path = "../../../primitives/arithmetic" } # used by generate_solution_type: frame-election-provider-support = { path = ".." } diff --git a/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml b/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml index a27f0f7d4dd480fc8d6b10cd16c52c0acb617009..1fb9e2387ed8de1be004faf876315c21eeac9696 100644 --- a/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml +++ b/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml @@ -21,7 +21,7 @@ honggfuzz = "0.5" rand = { version = "0.8", features = ["small_rng", "std"] } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-election-provider-solution-type = { path = ".." } frame-election-provider-support = { path = "../.." } sp-arithmetic = { path = "../../../../primitives/arithmetic" } diff --git a/substrate/frame/election-provider-support/src/bounds.rs b/substrate/frame/election-provider-support/src/bounds.rs index b9ae21e49ca70b3700ad43603fbac38f217f75de..6b2423b7fece6bfd1273bab51578bb32032dff31 100644 --- a/substrate/frame/election-provider-support/src/bounds.rs +++ b/substrate/frame/election-provider-support/src/bounds.rs @@ -62,7 +62,7 @@ use sp_runtime::traits::Zero; /// Encapsulates the counting of things that can be bounded in an election, such as voters, /// targets or anything else. /// -/// This struct is defined mostly to prevent callers from mistankingly using `CountBound` instead of +/// This struct is defined mostly to prevent callers from mistakenly using `CountBound` instead of /// `SizeBound` and vice-versa. #[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)] pub struct CountBound(pub u32); @@ -96,7 +96,7 @@ impl Zero for CountBound { /// logic and implementation, but it most likely will represent bytes in SCALE encoding in this /// context. /// -/// This struct is defined mostly to prevent callers from mistankingly using `CountBound` instead of +/// This struct is defined mostly to prevent callers from mistakenly using `CountBound` instead of /// `SizeBound` and vice-versa. #[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)] pub struct SizeBound(pub u32); diff --git a/substrate/frame/elections-phragmen/Cargo.toml b/substrate/frame/elections-phragmen/Cargo.toml index 4dc4a3454aa031da708f536aadcbe9764b093726..81dc48476a065b60e41ce6f703317b77ae75b0fe 100644 --- a/substrate/frame/elections-phragmen/Cargo.toml +++ b/substrate/frame/elections-phragmen/Cargo.toml @@ -20,7 +20,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/examples/basic/Cargo.toml b/substrate/frame/examples/basic/Cargo.toml index e4ab5112201d1661d1bbc991489bb6d847d6c32d..43b37c6beba2c6c2545c4b6bc5191e40009065fa 100644 --- a/substrate/frame/examples/basic/Cargo.toml +++ b/substrate/frame/examples/basic/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } frame-support = { path = "../../support", default-features = false } frame-system = { path = "../../system", default-features = false } diff --git a/substrate/frame/examples/default-config/Cargo.toml b/substrate/frame/examples/default-config/Cargo.toml index e40845a425a29d8d2868e823c1ba34dbabd136c0..2aa062ee6c1a0765f9e20d955d1f312c06157966 100644 --- a/substrate/frame/examples/default-config/Cargo.toml +++ b/substrate/frame/examples/default-config/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../support", default-features = false } frame-system = { path = "../../system", default-features = false } diff --git a/substrate/frame/examples/dev-mode/Cargo.toml b/substrate/frame/examples/dev-mode/Cargo.toml index a9c4e3f3b1fccb9085f1bc9a6a2f018a055d683a..71b97796ecddf89275c3594362d9ece122b7387f 100644 --- a/substrate/frame/examples/dev-mode/Cargo.toml +++ b/substrate/frame/examples/dev-mode/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../support", default-features = false } frame-system = { path = "../../system", default-features = false } pallet-balances = { path = "../../balances", default-features = false } diff --git a/substrate/frame/examples/frame-crate/Cargo.toml b/substrate/frame/examples/frame-crate/Cargo.toml index 93a46ba7b249cc806d2366e2bd1ee947b162157d..76bfd65282a940cff1946909882bb4a59270958a 100644 --- a/substrate/frame/examples/frame-crate/Cargo.toml +++ b/substrate/frame/examples/frame-crate/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame = { path = "../..", default-features = false, features = ["experimental", "runtime"] } diff --git a/substrate/frame/examples/kitchensink/Cargo.toml b/substrate/frame/examples/kitchensink/Cargo.toml index 37384107530ee4dca645f12f1b50e4af5547dabc..d8311897c6e1158f8a94520bfd1c6f2712aecbbc 100644 --- a/substrate/frame/examples/kitchensink/Cargo.toml +++ b/substrate/frame/examples/kitchensink/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../support", default-features = false, features = ["experimental"] } frame-system = { path = "../../system", default-features = false } diff --git a/substrate/frame/examples/offchain-worker/Cargo.toml b/substrate/frame/examples/offchain-worker/Cargo.toml index fc5151ff292b4571d9cb898d6246668d5f3b4dc8..468af0345cae71d97afea4c9ce02f2ab2491e5db 100644 --- a/substrate/frame/examples/offchain-worker/Cargo.toml +++ b/substrate/frame/examples/offchain-worker/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } lite-json = { version = "0.2.0", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../support", default-features = false } frame-system = { path = "../../system", default-features = false } sp-core = { path = "../../../primitives/core", default-features = false } diff --git a/substrate/frame/examples/offchain-worker/src/lib.rs b/substrate/frame/examples/offchain-worker/src/lib.rs index d21c8b4cfd24971f945b73c558463ee7a1f47f77..0a90e896188ebe111baafd46b265d7e8d8790d9e 100644 --- a/substrate/frame/examples/offchain-worker/src/lib.rs +++ b/substrate/frame/examples/offchain-worker/src/lib.rs @@ -495,7 +495,7 @@ impl Pallet { // Here we showcase two ways to send an unsigned transaction / unsigned payload (raw) // // By default unsigned transactions are disallowed, so we need to whitelist this case - // by writing `UnsignedValidator`. Note that it's EXTREMELY important to carefuly + // by writing `UnsignedValidator`. Note that it's EXTREMELY important to carefully // implement unsigned validation logic, as any mistakes can lead to opening DoS or spam // attack vectors. See validation logic docs for more details. // diff --git a/substrate/frame/examples/single-block-migrations/Cargo.toml b/substrate/frame/examples/single-block-migrations/Cargo.toml index 613742a6787c9aa91b2d4dffa55b25afdd675c7c..b1d560a85f3ff59de2ab6560223f7b7657f2b27a 100644 --- a/substrate/frame/examples/single-block-migrations/Cargo.toml +++ b/substrate/frame/examples/single-block-migrations/Cargo.toml @@ -13,10 +13,10 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -docify = { version = "0.2.3", default-features = false } +docify = "0.2.8" log = { version = "0.4.21", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../support", default-features = false } frame-executive = { path = "../../executive", default-features = false } frame-system = { path = "../../system", default-features = false } diff --git a/substrate/frame/examples/single-block-migrations/src/lib.rs b/substrate/frame/examples/single-block-migrations/src/lib.rs index 86a9e5d6e95bbeaa0230b81153c3f4149f332436..411537aa8c65f0c1462336cd6705a2a900bc230a 100644 --- a/substrate/frame/examples/single-block-migrations/src/lib.rs +++ b/substrate/frame/examples/single-block-migrations/src/lib.rs @@ -20,7 +20,7 @@ //! An example pallet demonstrating best-practices for writing single-block migrations in the //! context of upgrading pallet storage. //! -//! ## Forwarning +//! ## Forewarning //! //! Single block migrations **MUST** execute in a single block, therefore when executed on a //! parachain are only appropriate when guaranteed to not exceed block weight limits. If a @@ -66,7 +66,7 @@ //! //! ## Adding a migration module //! -//! Writing a pallets migrations in a seperate module is strongly recommended. +//! Writing a pallets migrations in a separate module is strongly recommended. //! //! Here's how the migration module is defined for this pallet: //! @@ -89,7 +89,7 @@ //! //! See the migration source code for detailed comments. //! -//! To keep the migration logic organised, it is split across additional modules: +//! Here's a brief overview of modules and types defined in `v1.rs`: //! //! ### `mod v0` //! @@ -98,28 +98,29 @@ //! //! This allows reading the old v0 value from storage during the migration. //! -//! ### `mod version_unchecked` +//! ### `InnerMigrateV0ToV1` //! //! Here we define our raw migration logic, -//! `version_unchecked::MigrateV0ToV1` which implements the [`OnRuntimeUpgrade`] trait. +//! `InnerMigrateV0ToV1` which implements the [`UncheckedOnRuntimeUpgrade`] trait. //! -//! Importantly, it is kept in a private module so that it cannot be accidentally used in a runtime. +//! #### Why [`UncheckedOnRuntimeUpgrade`]? //! -//! Private modules cannot be referenced in docs, so please read the code directly. +//! Otherwise, we would have two implementations of [`OnRuntimeUpgrade`] which could be confusing, +//! and may lead to accidentally using the wrong one. //! //! #### Standalone Struct or Pallet Hook? //! //! Note that the storage migration logic is attached to a standalone struct implementing -//! [`OnRuntimeUpgrade`], rather than implementing the +//! [`UncheckedOnRuntimeUpgrade`], rather than implementing the //! [`Hooks::on_runtime_upgrade`](frame_support::traits::Hooks::on_runtime_upgrade) hook directly on //! the pallet. The pallet hook is better suited for special types of logic that need to execute on //! every runtime upgrade, but not so much for one-off storage migrations. //! -//! ### `pub mod versioned` +//! ### `MigrateV0ToV1` //! -//! Here, `version_unchecked::MigrateV0ToV1` is wrapped in a +//! Here, `InnerMigrateV0ToV1` is wrapped in a //! [`VersionedMigration`] to define -//! [`versioned::MigrateV0ToV1`](crate::migrations::v1::versioned::MigrateV0ToV1), which may be used +//! [`MigrateV0ToV1`](crate::migrations::v1::MigrateV0ToV1), which may be used //! in runtimes. //! //! Using [`VersionedMigration`] ensures that @@ -128,8 +129,6 @@ //! - Reads and writes from checking and setting the on-chain storage version are accounted for in //! the final [`Weight`](frame_support::weights::Weight) //! -//! This is the only public module exported from `v1`. -//! //! ### `mod test` //! //! Here basic unit tests are defined for the migration. @@ -142,7 +141,8 @@ //! [`VersionedMigration`]: frame_support::migrations::VersionedMigration //! [`GetStorageVersion`]: frame_support::traits::GetStorageVersion //! [`OnRuntimeUpgrade`]: frame_support::traits::OnRuntimeUpgrade -//! [`MigrateV0ToV1`]: crate::migrations::v1::versioned::MigrationV0ToV1 +//! [`UncheckedOnRuntimeUpgrade`]: frame_support::traits::UncheckedOnRuntimeUpgrade +//! [`MigrateV0ToV1`]: crate::migrations::v1::MigrateV0ToV1 // We make sure this pallet uses `no_std` for compiling to Wasm. #![cfg_attr(not(feature = "std"), no_std)] diff --git a/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs b/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs index b46640a320207551ab55b1dc793574ce68c4c26c..18ef4e72cc4f5ce938abf03d5eaea3039ec0b030 100644 --- a/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs +++ b/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs @@ -17,7 +17,7 @@ use frame_support::{ storage_alias, - traits::{Get, OnRuntimeUpgrade}, + traits::{Get, UncheckedOnRuntimeUpgrade}, }; #[cfg(feature = "try-runtime")] @@ -34,118 +34,92 @@ mod v0 { pub type Value = StorageValue, u32>; } -/// Private module containing *version unchecked* migration logic. +/// Implements [`UncheckedOnRuntimeUpgrade`], migrating the state of this pallet from V0 to V1. /// -/// Should only be used by the [`VersionedMigration`](frame_support::migrations::VersionedMigration) -/// type in this module to create something to export. +/// In V0 of the template [`crate::Value`] is just a `u32`. In V1, it has been upgraded to +/// contain the struct [`crate::CurrentAndPreviousValue`]. /// -/// The unversioned migration should be kept private so the unversioned migration cannot -/// accidentally be used in any runtimes. -/// -/// For more about this pattern of keeping items private, see -/// - -/// - -mod version_unchecked { - use super::*; +/// In this migration, update the on-chain storage for the pallet to reflect the new storage +/// layout. +pub struct InnerMigrateV0ToV1(sp_std::marker::PhantomData); + +impl UncheckedOnRuntimeUpgrade for InnerMigrateV0ToV1 { + /// Return the existing [`crate::Value`] so we can check that it was correctly set in + /// `InnerMigrateV0ToV1::post_upgrade`. + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + use codec::Encode; + + // Access the old value using the `storage_alias` type + let old_value = v0::Value::::get(); + // Return it as an encoded `Vec` + Ok(old_value.encode()) + } - /// Implements [`OnRuntimeUpgrade`], migrating the state of this pallet from V0 to V1. - /// - /// In V0 of the template [`crate::Value`] is just a `u32`. In V1, it has been upgraded to - /// contain the struct [`crate::CurrentAndPreviousValue`]. + /// Migrate the storage from V0 to V1. /// - /// In this migration, update the on-chain storage for the pallet to reflect the new storage - /// layout. - pub struct MigrateV0ToV1(sp_std::marker::PhantomData); - - impl OnRuntimeUpgrade for MigrateV0ToV1 { - /// Return the existing [`crate::Value`] so we can check that it was correctly set in - /// `version_unchecked::MigrateV0ToV1::post_upgrade`. - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { - use codec::Encode; - - // Access the old value using the `storage_alias` type - let old_value = v0::Value::::get(); - // Return it as an encoded `Vec` - Ok(old_value.encode()) - } - - /// Migrate the storage from V0 to V1. - /// - /// - If the value doesn't exist, there is nothing to do. - /// - If the value exists, it is read and then written back to storage inside a - /// [`crate::CurrentAndPreviousValue`]. - fn on_runtime_upgrade() -> frame_support::weights::Weight { - // Read the old value from storage - if let Some(old_value) = v0::Value::::take() { - // Write the new value to storage - let new = crate::CurrentAndPreviousValue { current: old_value, previous: None }; - crate::Value::::put(new); - // One read for the old value, one write for the new value - T::DbWeight::get().reads_writes(1, 1) - } else { - // One read for trying to access the old value - T::DbWeight::get().reads(1) - } + /// - If the value doesn't exist, there is nothing to do. + /// - If the value exists, it is read and then written back to storage inside a + /// [`crate::CurrentAndPreviousValue`]. + fn on_runtime_upgrade() -> frame_support::weights::Weight { + // Read the old value from storage + if let Some(old_value) = v0::Value::::take() { + // Write the new value to storage + let new = crate::CurrentAndPreviousValue { current: old_value, previous: None }; + crate::Value::::put(new); + // One read for the old value, one write for the new value + T::DbWeight::get().reads_writes(1, 1) + } else { + // One read for trying to access the old value + T::DbWeight::get().reads(1) } + } - /// Verifies the storage was migrated correctly. - /// - /// - If there was no old value, the new value should not be set. - /// - If there was an old value, the new value should be a - /// [`crate::CurrentAndPreviousValue`]. - #[cfg(feature = "try-runtime")] - fn post_upgrade(state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { - use codec::Decode; - use frame_support::ensure; - - let maybe_old_value = Option::::decode(&mut &state[..]).map_err(|_| { - sp_runtime::TryRuntimeError::Other("Failed to decode old value from storage") - })?; - - match maybe_old_value { - Some(old_value) => { - let expected_new_value = - crate::CurrentAndPreviousValue { current: old_value, previous: None }; - let actual_new_value = crate::Value::::get(); - - ensure!(actual_new_value.is_some(), "New value not set"); - ensure!( - actual_new_value == Some(expected_new_value), - "New value not set correctly" - ); - }, - None => { - ensure!(crate::Value::::get().is_none(), "New value unexpectedly set"); - }, - }; - Ok(()) - } + /// Verifies the storage was migrated correctly. + /// + /// - If there was no old value, the new value should not be set. + /// - If there was an old value, the new value should be a [`crate::CurrentAndPreviousValue`]. + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + use codec::Decode; + use frame_support::ensure; + + let maybe_old_value = Option::::decode(&mut &state[..]).map_err(|_| { + sp_runtime::TryRuntimeError::Other("Failed to decode old value from storage") + })?; + + match maybe_old_value { + Some(old_value) => { + let expected_new_value = + crate::CurrentAndPreviousValue { current: old_value, previous: None }; + let actual_new_value = crate::Value::::get(); + + ensure!(actual_new_value.is_some(), "New value not set"); + ensure!( + actual_new_value == Some(expected_new_value), + "New value not set correctly" + ); + }, + None => { + ensure!(crate::Value::::get().is_none(), "New value unexpectedly set"); + }, + }; + Ok(()) } } -/// Public module containing *version checked* migration logic. -/// -/// This is the only module that should be exported from this module. -/// -/// See [`VersionedMigration`](frame_support::migrations::VersionedMigration) docs for more about -/// how it works. -pub mod versioned { - use super::*; - - /// `version_unchecked::MigrateV0ToV1` wrapped in a - /// [`VersionedMigration`](frame_support::migrations::VersionedMigration), which ensures that: - /// - The migration only runs once when the on-chain storage version is 0 - /// - The on-chain storage version is updated to `1` after the migration executes - /// - Reads/Writes from checking/settings the on-chain storage version are accounted for - pub type MigrateV0ToV1 = frame_support::migrations::VersionedMigration< - 0, // The migration will only execute when the on-chain storage version is 0 - 1, // The on-chain storage version will be set to 1 after the migration is complete - version_unchecked::MigrateV0ToV1, - crate::pallet::Pallet, - ::DbWeight, - >; -} +/// [`UncheckedOnRuntimeUpgrade`] implementation [`InnerMigrateV0ToV1`] wrapped in a +/// [`VersionedMigration`](frame_support::migrations::VersionedMigration), which ensures that: +/// - The migration only runs once when the on-chain storage version is 0 +/// - The on-chain storage version is updated to `1` after the migration executes +/// - Reads/Writes from checking/settings the on-chain storage version are accounted for +pub type MigrateV0ToV1 = frame_support::migrations::VersionedMigration< + 0, // The migration will only execute when the on-chain storage version is 0 + 1, // The on-chain storage version will be set to 1 after the migration is complete + InnerMigrateV0ToV1, + crate::pallet::Pallet, + ::DbWeight, +>; /// Tests for our migration. /// @@ -155,10 +129,10 @@ pub mod versioned { /// 3. The storage is in the expected state after the migration #[cfg(any(all(feature = "try-runtime", test), doc))] mod test { + use self::InnerMigrateV0ToV1; use super::*; use crate::mock::{new_test_ext, MockRuntime}; use frame_support::assert_ok; - use version_unchecked::MigrateV0ToV1; #[test] fn handles_no_existing_value() { @@ -168,16 +142,16 @@ mod test { assert!(v0::Value::::get().is_none()); // Get the pre_upgrade bytes - let bytes = match MigrateV0ToV1::::pre_upgrade() { + let bytes = match InnerMigrateV0ToV1::::pre_upgrade() { Ok(bytes) => bytes, Err(e) => panic!("pre_upgrade failed: {:?}", e), }; // Execute the migration - let weight = MigrateV0ToV1::::on_runtime_upgrade(); + let weight = InnerMigrateV0ToV1::::on_runtime_upgrade(); // Verify post_upgrade succeeds - assert_ok!(MigrateV0ToV1::::post_upgrade(bytes)); + assert_ok!(InnerMigrateV0ToV1::::post_upgrade(bytes)); // The weight should be just 1 read for trying to access the old value. assert_eq!(weight, ::DbWeight::get().reads(1)); @@ -195,16 +169,16 @@ mod test { v0::Value::::put(initial_value); // Get the pre_upgrade bytes - let bytes = match MigrateV0ToV1::::pre_upgrade() { + let bytes = match InnerMigrateV0ToV1::::pre_upgrade() { Ok(bytes) => bytes, Err(e) => panic!("pre_upgrade failed: {:?}", e), }; // Execute the migration - let weight = MigrateV0ToV1::::on_runtime_upgrade(); + let weight = InnerMigrateV0ToV1::::on_runtime_upgrade(); // Verify post_upgrade succeeds - assert_ok!(MigrateV0ToV1::::post_upgrade(bytes)); + assert_ok!(InnerMigrateV0ToV1::::post_upgrade(bytes)); // The weight used should be 1 read for the old value, and 1 write for the new // value. diff --git a/substrate/frame/examples/split/Cargo.toml b/substrate/frame/examples/split/Cargo.toml index d140fc3eef43b04ca7b9175efbdae2830e2d6282..1ef3521e06063cf15679e248c6b46ae27a14e946 100644 --- a/substrate/frame/examples/split/Cargo.toml +++ b/substrate/frame/examples/split/Cargo.toml @@ -6,7 +6,7 @@ edition.workspace = true license = "MIT-0" homepage = "https://substrate.io" repository.workspace = true -description = "FRAME example splitted pallet" +description = "FRAME example split pallet" readme = "README.md" [lints] @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../support", default-features = false } frame-system = { path = "../../system", default-features = false } diff --git a/substrate/frame/examples/tasks/Cargo.toml b/substrate/frame/examples/tasks/Cargo.toml index 41521114366a298861c883b4d6f14b0eaf6c4276..3f59d57ea0f202729f11ac549daa828424343671 100644 --- a/substrate/frame/examples/tasks/Cargo.toml +++ b/substrate/frame/examples/tasks/Cargo.toml @@ -5,7 +5,7 @@ authors.workspace = true edition.workspace = true license.workspace = true repository.workspace = true -description = "Pallet to demonstrate the usage of Tasks to recongnize and execute service work" +description = "Pallet to demonstrate the usage of Tasks to recognize and execute service work" [lints] workspace = true @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../support", default-features = false } frame-system = { path = "../../system", default-features = false } diff --git a/substrate/frame/executive/Cargo.toml b/substrate/frame/executive/Cargo.toml index 63285e4cb4939c10db3342b5d59eeffc63103d48..95de7c3f3d1f086a39a8f12af6c8a851eff3fa4d 100644 --- a/substrate/frame/executive/Cargo.toml +++ b/substrate/frame/executive/Cargo.toml @@ -21,7 +21,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } frame-try-runtime = { path = "../try-runtime", default-features = false, optional = true } diff --git a/substrate/frame/fast-unstake/Cargo.toml b/substrate/frame/fast-unstake/Cargo.toml index eca8247845e2a5990ee98426866fdf5ac929034d..f05f22f764164ce15704a9200c78e65b2867c908 100644 --- a/substrate/frame/fast-unstake/Cargo.toml +++ b/substrate/frame/fast-unstake/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } @@ -30,7 +30,7 @@ frame-election-provider-support = { path = "../election-provider-support", defau frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -docify = "0.2.7" +docify = "0.2.8" [dev-dependencies] pallet-staking-reward-curve = { path = "../staking/reward-curve" } diff --git a/substrate/frame/fast-unstake/src/tests.rs b/substrate/frame/fast-unstake/src/tests.rs index b19fe3b8c463ba54571d471b0672d8b7f7d88426..77128872f285f379237153290e394f3a461226e8 100644 --- a/substrate/frame/fast-unstake/src/tests.rs +++ b/substrate/frame/fast-unstake/src/tests.rs @@ -110,7 +110,7 @@ fn cannot_register_if_head() { stashes: bounded_vec![(1, Deposit::get())], checked: bounded_vec![], }); - // Controller attempts to regsiter + // Controller attempts to register assert_noop!( FastUnstake::register_fast_unstake(RuntimeOrigin::signed(1)), Error::::AlreadyHead diff --git a/substrate/frame/glutton/Cargo.toml b/substrate/frame/glutton/Cargo.toml index 7de18080b879edb369811a321501c8fe0630e3b7..5ce010f1c2622fbbefa053bf1bb45383e853af4c 100644 --- a/substrate/frame/glutton/Cargo.toml +++ b/substrate/frame/glutton/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] blake2 = { version = "0.10.4", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } log = { workspace = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/grandpa/Cargo.toml b/substrate/frame/grandpa/Cargo.toml index db540564fbe7bda05b006b5fb7d2b6da2e33d7c5..f4dd92129f38aeb76a1df8afd21494102eb68bd9 100644 --- a/substrate/frame/grandpa/Cargo.toml +++ b/substrate/frame/grandpa/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/grandpa/src/benchmarking.rs b/substrate/frame/grandpa/src/benchmarking.rs index 7a87f0c4b07881864aefd8eb5aad55ea41fc713d..c89592b3b3590cc6c99b2a9e546eb7a280e843c5 100644 --- a/substrate/frame/grandpa/src/benchmarking.rs +++ b/substrate/frame/grandpa/src/benchmarking.rs @@ -29,7 +29,7 @@ benchmarks! { // NOTE: generated with the test below `test_generate_equivocation_report_blob`. // the output should be deterministic since the keys we use are static. // with the current benchmark setup it is not possible to generate this - // programatically from the benchmark setup. + // programmatically from the benchmark setup. const EQUIVOCATION_PROOF_BLOB: [u8; 257] = [ 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 136, 220, 52, 23, 213, 5, 142, 196, 180, 80, 62, 12, 18, 234, 26, 10, 137, 190, 32, diff --git a/substrate/frame/grandpa/src/migrations/v5.rs b/substrate/frame/grandpa/src/migrations/v5.rs index 24cfc34104b5f31cca70f93bc53309ee954a2d42..a0865a3f2bf9a8b6be340065b8b4658c97d5050a 100644 --- a/substrate/frame/grandpa/src/migrations/v5.rs +++ b/substrate/frame/grandpa/src/migrations/v5.rs @@ -20,7 +20,7 @@ use codec::Decode; use frame_support::{ migrations::VersionedMigration, storage, - traits::{Get, OnRuntimeUpgrade}, + traits::{Get, UncheckedOnRuntimeUpgrade}, weights::Weight, }; use sp_consensus_grandpa::AuthorityList; @@ -36,9 +36,9 @@ fn load_authority_list() -> AuthorityList { } /// Actual implementation of [`MigrateV4ToV5`]. -pub struct MigrateImpl(PhantomData); +pub struct UncheckedMigrateImpl(PhantomData); -impl OnRuntimeUpgrade for MigrateImpl { +impl UncheckedOnRuntimeUpgrade for UncheckedMigrateImpl { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { use codec::Encode; @@ -92,5 +92,10 @@ impl OnRuntimeUpgrade for MigrateImpl { /// Migrate the storage from V4 to V5. /// /// Switches from `GRANDPA_AUTHORITIES_KEY` to a normal FRAME storage item. -pub type MigrateV4ToV5 = - VersionedMigration<4, 5, MigrateImpl, Pallet, ::DbWeight>; +pub type MigrateV4ToV5 = VersionedMigration< + 4, + 5, + UncheckedMigrateImpl, + Pallet, + ::DbWeight, +>; diff --git a/substrate/frame/grandpa/src/tests.rs b/substrate/frame/grandpa/src/tests.rs index 993d72af6d41062a24362bbbdaf394cb651dd2c7..8b12d63adaadb27c3835f29d8345bd6e6a5b6997 100644 --- a/substrate/frame/grandpa/src/tests.rs +++ b/substrate/frame/grandpa/src/tests.rs @@ -634,7 +634,7 @@ fn report_equivocation_invalid_equivocation_proof() { (1, H256::zero(), 10, &equivocation_keyring), )); - // votes targetting different rounds, there is no equivocation. + // votes targeting different rounds, there is no equivocation. assert_invalid_equivocation_proof(generate_equivocation_proof( set_id, (1, H256::random(), 10, &equivocation_keyring), diff --git a/substrate/frame/identity/Cargo.toml b/substrate/frame/identity/Cargo.toml index 912444bf603606e885d7b0c38c8f8bccbbe93d34..8c0052004ae3d595f3d1f0bef75d92584d3c3ff5 100644 --- a/substrate/frame/identity/Cargo.toml +++ b/substrate/frame/identity/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } enumflags2 = { version = "0.7.7" } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/identity/src/benchmarking.rs b/substrate/frame/identity/src/benchmarking.rs index e0b45eeecd1cf5494f52f83ec835037075155ea7..cdcdb9522615d0439f4d8370551d73b710edc685 100644 --- a/substrate/frame/identity/src/benchmarking.rs +++ b/substrate/frame/identity/src/benchmarking.rs @@ -671,10 +671,10 @@ mod benchmarks { let username = bounded_username::(bench_username(), bench_suffix()); Identity::::queue_acceptance(&caller, username.clone()); - let expected_exiration = + let expected_expiration = frame_system::Pallet::::block_number() + T::PendingUsernameExpiration::get(); - run_to_block::(expected_exiration + One::one()); + run_to_block::(expected_expiration + One::one()); #[extrinsic_call] _(RawOrigin::Signed(caller.clone()), username); diff --git a/substrate/frame/identity/src/lib.rs b/substrate/frame/identity/src/lib.rs index 78d59180b3f2f8ca684dc52d9e218de8914575b8..4a977880b3150cf9ffa7e2b4032e258483a6d242 100644 --- a/substrate/frame/identity/src/lib.rs +++ b/substrate/frame/identity/src/lib.rs @@ -161,7 +161,7 @@ pub mod pallet { /// Structure holding information about an identity. type IdentityInformation: IdentityInformationProvider; - /// Maxmimum number of registrars allowed in the system. Needed to bound the complexity + /// Maximum number of registrars allowed in the system. Needed to bound the complexity /// of, e.g., updating judgements. #[pallet::constant] type MaxRegistrars: Get; diff --git a/substrate/frame/identity/src/migration.rs b/substrate/frame/identity/src/migration.rs index 88ac08d1bf5648e2640a2a248fb8d56113ef3c3a..8725bfd39df1409516c84d12e78b9d0f4bb03fc3 100644 --- a/substrate/frame/identity/src/migration.rs +++ b/substrate/frame/identity/src/migration.rs @@ -16,7 +16,9 @@ //! Storage migrations for the Identity pallet. use super::*; -use frame_support::{migrations::VersionedMigration, pallet_prelude::*, traits::OnRuntimeUpgrade}; +use frame_support::{ + migrations::VersionedMigration, pallet_prelude::*, traits::UncheckedOnRuntimeUpgrade, +}; #[cfg(feature = "try-runtime")] use codec::{Decode, Encode}; @@ -66,7 +68,7 @@ pub mod v1 { /// prevent stalling a parachain by accumulating too much weight in the migration. To have an /// unlimited migration (e.g. in a chain without PoV limits), set this to `u64::MAX`. pub struct VersionUncheckedMigrateV0ToV1(PhantomData); - impl OnRuntimeUpgrade for VersionUncheckedMigrateV0ToV1 { + impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateV0ToV1 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, TryRuntimeError> { let identities = v0::IdentityOf::::iter().count(); diff --git a/substrate/frame/im-online/Cargo.toml b/substrate/frame/im-online/Cargo.toml index 038cbbcd678cca4d947eb4750e11314f643f2ad9..46b416f0f9a8d72a12aa12f9d8eeb99aaf4c1c2a 100644 --- a/substrate/frame/im-online/Cargo.toml +++ b/substrate/frame/im-online/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/im-online/src/tests.rs b/substrate/frame/im-online/src/tests.rs index 5e5212e1d562cbf331027a1206c3cc0b91be42de..f9959593494a0b94c4093f4c06f0e45c1e3e4f59 100644 --- a/substrate/frame/im-online/src/tests.rs +++ b/substrate/frame/im-online/src/tests.rs @@ -416,7 +416,7 @@ fn should_handle_non_linear_session_progress() { Session::rotate_session(); - // if we don't have valid results for the current session progres then + // if we don't have valid results for the current session progress then // we'll fallback to `HeartbeatAfter` and only heartbeat on block 5. MockCurrentSessionProgress::mutate(|p| *p = Some(None)); assert_eq!(ImOnline::send_heartbeats(2).err(), Some(OffchainErr::TooEarly)); diff --git a/substrate/frame/indices/Cargo.toml b/substrate/frame/indices/Cargo.toml index f810ea36a707fa0c47a8fbb51c523f9ad65fea90..7b14bf358f1ef3e929e8e6be67f33a54b3590928 100644 --- a/substrate/frame/indices/Cargo.toml +++ b/substrate/frame/indices/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/insecure-randomness-collective-flip/Cargo.toml b/substrate/frame/insecure-randomness-collective-flip/Cargo.toml index f26bfa95bfd0628f5f5e4a81979d3611102e0847..f4d65d9e560a51afe6211ca2889a95eb864a42f3 100644 --- a/substrate/frame/insecure-randomness-collective-flip/Cargo.toml +++ b/substrate/frame/insecure-randomness-collective-flip/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } safe-mix = { version = "1.0", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false } diff --git a/substrate/frame/lottery/Cargo.toml b/substrate/frame/lottery/Cargo.toml index 3930ff32fc915dd87c12ad59d0a22b7dce1764b8..5f79704445f964eae544f0c8678f08f86ab85213 100644 --- a/substrate/frame/lottery/Cargo.toml +++ b/substrate/frame/lottery/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/membership/Cargo.toml b/substrate/frame/membership/Cargo.toml index 64214670292739395dc38a024085dec52868d275..6f67db0ae709901f30c2339c40fd6d3399b06606 100644 --- a/substrate/frame/membership/Cargo.toml +++ b/substrate/frame/membership/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/membership/src/lib.rs b/substrate/frame/membership/src/lib.rs index 426fc985a52f3fcd29c1a6802ee16ef4f34d9884..aa6be6497eea697235f71c2927b1adf6044052bd 100644 --- a/substrate/frame/membership/src/lib.rs +++ b/substrate/frame/membership/src/lib.rs @@ -369,6 +369,18 @@ impl, I: 'static> SortedMembers for Pallet { fn count() -> usize { Members::::decode_len().unwrap_or(0) } + + #[cfg(feature = "runtime-benchmarks")] + fn add(new_member: &T::AccountId) { + use frame_support::{assert_ok, traits::EnsureOrigin}; + let new_member_lookup = T::Lookup::unlookup(new_member.clone()); + + if let Ok(origin) = T::AddOrigin::try_successful_origin() { + assert_ok!(Pallet::::add_member(origin, new_member_lookup,)); + } else { + log::error!(target: LOG_TARGET, "Failed to add `{new_member:?}` in `SortedMembers::add`.") + } + } } #[cfg(feature = "runtime-benchmarks")] diff --git a/substrate/frame/merkle-mountain-range/Cargo.toml b/substrate/frame/merkle-mountain-range/Cargo.toml index d623e25cec2613d590562e5b17d077a10a833357..6dc919e16505a164e84c0e5e5b6c4f299b2da955 100644 --- a/substrate/frame/merkle-mountain-range/Cargo.toml +++ b/substrate/frame/merkle-mountain-range/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } @@ -29,7 +29,7 @@ sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] array-bytes = "6.1" -env_logger = "0.9" +env_logger = "0.11" itertools = "0.10.3" [features] diff --git a/substrate/frame/merkle-mountain-range/src/lib.rs b/substrate/frame/merkle-mountain-range/src/lib.rs index e3eb0dc8cf5ec87ee6867978c45b13533d7419f3..7b6edb37b7f7bc8608a417aa9eff6f0713edf9df 100644 --- a/substrate/frame/merkle-mountain-range/src/lib.rs +++ b/substrate/frame/merkle-mountain-range/src/lib.rs @@ -89,7 +89,7 @@ mod tests; /// is not available (since the block is not finished yet), /// we use the `parent_hash` here along with parent block number. pub struct ParentNumberAndHash { - _phanthom: sp_std::marker::PhantomData, + _phantom: sp_std::marker::PhantomData, } impl LeafDataProvider for ParentNumberAndHash { diff --git a/substrate/frame/message-queue/Cargo.toml b/substrate/frame/message-queue/Cargo.toml index 8d9da7df39ea58ad1f91d45e5db1bc026031ed89..f263c41831beab8f77aa04522e6cec40d75af703 100644 --- a/substrate/frame/message-queue/Cargo.toml +++ b/substrate/frame/message-queue/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } log = { workspace = true } environmental = { version = "1.1.4", default-features = false } diff --git a/substrate/frame/message-queue/src/integration_test.rs b/substrate/frame/message-queue/src/integration_test.rs index 26a330cc88e8ed2a53c2d19b2d3b90272c7e7778..14b8d2217eb2b3908377014feb90c73588c79eff 100644 --- a/substrate/frame/message-queue/src/integration_test.rs +++ b/substrate/frame/message-queue/src/integration_test.rs @@ -73,6 +73,7 @@ impl Config for Test { type HeapSize = HeapSize; type MaxStale = MaxStale; type ServiceWeight = ServiceWeight; + type IdleMaxServiceWeight = (); } /// Simulates heavy usage by enqueueing and processing large amounts of messages. diff --git a/substrate/frame/message-queue/src/lib.rs b/substrate/frame/message-queue/src/lib.rs index 61028057394f6f3bedda173288fa1c9aea3e9e9a..ec85c785f79eb6eea628d9a8df6db0667d2bf70e 100644 --- a/substrate/frame/message-queue/src/lib.rs +++ b/substrate/frame/message-queue/src/lib.rs @@ -525,12 +525,21 @@ pub mod pallet { type MaxStale: Get; /// The amount of weight (if any) which should be provided to the message queue for - /// servicing enqueued items. + /// servicing enqueued items `on_initialize`. /// /// This may be legitimately `None` in the case that you will call - /// `ServiceQueues::service_queues` manually. + /// `ServiceQueues::service_queues` manually or set [`Self::IdleMaxServiceWeight`] to have + /// it run in `on_idle`. #[pallet::constant] type ServiceWeight: Get>; + + /// The maximum amount of weight (if any) to be used from remaining weight `on_idle` which + /// should be provided to the message queue for servicing enqueued items `on_idle`. + /// Useful for parachains to process messages at the same block they are received. + /// + /// If `None`, it will not call `ServiceQueues::service_queues` in `on_idle`. + #[pallet::constant] + type IdleMaxServiceWeight: Get>; } #[pallet::event] @@ -643,6 +652,15 @@ pub mod pallet { } } + fn on_idle(_n: BlockNumberFor, remaining_weight: Weight) -> Weight { + if let Some(weight_limit) = T::IdleMaxServiceWeight::get() { + // Make use of the remaining weight to process enqueued messages. + Self::service_queues(weight_limit.min(remaining_weight)) + } else { + Weight::zero() + } + } + #[cfg(feature = "try-runtime")] fn try_state(_: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { Self::do_try_state() @@ -1460,7 +1478,7 @@ impl Pallet { /// Run a closure that errors on re-entrance. Meant to be used by anything that services queues. pub(crate) fn with_service_mutex R, R>(f: F) -> Result { - // Holds the singelton token instance. + // Holds the singleton token instance. environmental::environmental!(token: Option<()>); token::using_once(&mut Some(()), || { diff --git a/substrate/frame/message-queue/src/mock.rs b/substrate/frame/message-queue/src/mock.rs index f22f318b8ef12511e840f21d874b0d3c2fa31519..1281de6b0a66f37e265071b390981138b30dad2e 100644 --- a/substrate/frame/message-queue/src/mock.rs +++ b/substrate/frame/message-queue/src/mock.rs @@ -56,6 +56,7 @@ impl Config for Test { type HeapSize = HeapSize; type MaxStale = MaxStale; type ServiceWeight = ServiceWeight; + type IdleMaxServiceWeight = ServiceWeight; } /// Mocked `WeightInfo` impl with allows to set the weight per call. diff --git a/substrate/frame/message-queue/src/tests.rs b/substrate/frame/message-queue/src/tests.rs index dbef94b3b10354a8b503b795bd46bf90010bb80f..d6788847d57173da6a7a06740daf55946f15b15f 100644 --- a/substrate/frame/message-queue/src/tests.rs +++ b/substrate/frame/message-queue/src/tests.rs @@ -90,7 +90,7 @@ fn queue_priority_retains() { MessageQueue::enqueue_message(msg("d"), Everywhere(2)); assert_ring(&[Everywhere(1), Everywhere(2), Everywhere(3)]); // service head is 1, it will process a, leaving service head at 2. it also processes b but - // doees not empty queue 2, so service head will end at 2. + // does not empty queue 2, so service head will end at 2. assert_eq!(MessageQueue::service_queues(2.into_weight()), 2.into_weight()); assert_eq!( MessagesProcessed::take(), @@ -1838,3 +1838,45 @@ fn with_service_mutex_works() { with_service_mutex(|| called = 3).unwrap(); assert_eq!(called, 3); } + +#[test] +fn process_enqueued_on_idle() { + use MessageOrigin::*; + build_and_execute::(|| { + // Some messages enqueued on previous block. + MessageQueue::enqueue_messages(vec![msg("a"), msg("ab"), msg("abc")].into_iter(), Here); + assert_eq!(BookStateFor::::iter().count(), 1); + + // Process enqueued messages from previous block. + Pallet::::on_initialize(1); + assert_eq!( + MessagesProcessed::take(), + vec![(b"a".to_vec(), Here), (b"ab".to_vec(), Here), (b"abc".to_vec(), Here),] + ); + + MessageQueue::enqueue_messages(vec![msg("x"), msg("xy"), msg("xyz")].into_iter(), There); + assert_eq!(BookStateFor::::iter().count(), 2); + + // Enough weight to process on idle. + Pallet::::on_idle(1, Weight::from_parts(100, 100)); + assert_eq!( + MessagesProcessed::take(), + vec![(b"x".to_vec(), There), (b"xy".to_vec(), There), (b"xyz".to_vec(), There)] + ); + }) +} + +#[test] +fn process_enqueued_on_idle_requires_enough_weight() { + use MessageOrigin::*; + build_and_execute::(|| { + Pallet::::on_initialize(1); + + MessageQueue::enqueue_messages(vec![msg("x"), msg("xy"), msg("xyz")].into_iter(), There); + assert_eq!(BookStateFor::::iter().count(), 1); + + // Not enough weight to process on idle. + Pallet::::on_idle(1, Weight::from_parts(0, 0)); + assert_eq!(MessagesProcessed::take(), vec![]); + }) +} diff --git a/substrate/frame/migrations/Cargo.toml b/substrate/frame/migrations/Cargo.toml index 40059fb9ec4356b819cb8cbeda1889e1bdd29e25..4726ac5c521e7395a29d374ab195a4a5a2147ff5 100644 --- a/substrate/frame/migrations/Cargo.toml +++ b/substrate/frame/migrations/Cargo.toml @@ -12,10 +12,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -docify = "0.1.14" +docify = "0.2.8" impl-trait-for-tuples = "0.2.2" log = "0.4.21" -scale-info = { version = "2.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { default-features = false, optional = true, path = "../benchmarking" } frame-support = { default-features = false, path = "../support" } diff --git a/substrate/frame/migrations/src/lib.rs b/substrate/frame/migrations/src/lib.rs index cd57d89f440f79d39f98a3da81ed57b6cb5bfcf5..30ed9c08830c8a3dea5599cd634dc76faaabc38a 100644 --- a/substrate/frame/migrations/src/lib.rs +++ b/substrate/frame/migrations/src/lib.rs @@ -35,7 +35,7 @@ //! succeeding after two steps. A runtime upgrade is then enacted and the block number is advanced //! until all migrations finish executing. Afterwards, the recorded historic migrations are //! checked and events are asserted. -#![doc = docify::embed!("substrate/frame/migrations/src/tests.rs", simple_works)] +#![doc = docify::embed!("src/tests.rs", simple_works)] //! //! ## Pallet API //! diff --git a/substrate/frame/migrations/src/mock_helpers.rs b/substrate/frame/migrations/src/mock_helpers.rs index c5e23efb4e314e6f5562c50521a533c82f94669e..995ec0a922ccbce7745446bc5af73d9c2f20b044 100644 --- a/substrate/frame/migrations/src/mock_helpers.rs +++ b/substrate/frame/migrations/src/mock_helpers.rs @@ -107,7 +107,7 @@ impl SteppedMigrations for MockedMigrations { cursor: Option>, meter: &mut WeightMeter, ) -> Option>, SteppedMigrationError>> { - // This is a hack but should be fine. We dont need it in testing. + // This is a hack but should be fine. We don't need it in testing. Self::nth_step(n, cursor, meter) } diff --git a/substrate/frame/mixnet/Cargo.toml b/substrate/frame/mixnet/Cargo.toml index d1bb01dde1a47e70587dc37ad27123a0632cdcf7..6a4ef5c29ac8b0afe770e51e802f39ca0409eb72 100644 --- a/substrate/frame/mixnet/Cargo.toml +++ b/substrate/frame/mixnet/Cargo.toml @@ -21,7 +21,7 @@ frame-benchmarking = { default-features = false, optional = true, path = "../ben frame-support = { default-features = false, path = "../support" } frame-system = { default-features = false, path = "../system" } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], workspace = true } sp-application-crypto = { default-features = false, path = "../../primitives/application-crypto" } sp-arithmetic = { default-features = false, path = "../../primitives/arithmetic" } diff --git a/substrate/frame/multisig/Cargo.toml b/substrate/frame/multisig/Cargo.toml index 1d2a79bdc52f2fadd6fe2aa782029936c76a28e0..2437acbc2e231b8d9c12fef4e2089e0110193be4 100644 --- a/substrate/frame/multisig/Cargo.toml +++ b/substrate/frame/multisig/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/nft-fractionalization/Cargo.toml b/substrate/frame/nft-fractionalization/Cargo.toml index 8002b7e1165f1648d5edd39486c996f4eabe2bc6..b5a929468f7dc734ac3efb394b8a99475bb00a41 100644 --- a/substrate/frame/nft-fractionalization/Cargo.toml +++ b/substrate/frame/nft-fractionalization/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/nfts/Cargo.toml b/substrate/frame/nfts/Cargo.toml index 69e9ea170b14c97ef81a9a5a4fa8ae855cce94d2..4f818ea3e08c52e741124f42463f3b9ecc3ae9c9 100644 --- a/substrate/frame/nfts/Cargo.toml +++ b/substrate/frame/nfts/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } enumflags2 = { version = "0.7.7" } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/nis/Cargo.toml b/substrate/frame/nis/Cargo.toml index 025daa07b0c5314011b325570781474356209d02..d0ba74a9273152dcac8e541ee8be8a41d1a5b584 100644 --- a/substrate/frame/nis/Cargo.toml +++ b/substrate/frame/nis/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/nis/README.md b/substrate/frame/nis/README.md index 032df7d01868f8643284985985fc98b3f6ccc0ac..8a1a30f17e18a20168e81995e2b64e2f87603968 100644 --- a/substrate/frame/nis/README.md +++ b/substrate/frame/nis/README.md @@ -1,5 +1,5 @@ # NIS Module -Provides a non-interactiove variant of staking. +Provides a non-interactive variant of staking. License: Apache-2.0 diff --git a/substrate/frame/nis/src/lib.rs b/substrate/frame/nis/src/lib.rs index 5e547b63e5474ea36bdcbdb699200f162ca3ac2e..7655cd1a82433215552ed9e7d08a7ec50354a573 100644 --- a/substrate/frame/nis/src/lib.rs +++ b/substrate/frame/nis/src/lib.rs @@ -426,7 +426,7 @@ pub mod pallet { }, /// An automatic funding of the deficit was made. Funded { deficit: BalanceOf }, - /// A receipt was transfered. + /// A receipt was transferred. Transferred { from: T::AccountId, to: T::AccountId, index: ReceiptIndex }, } @@ -457,7 +457,7 @@ pub mod pallet { AlreadyFunded, /// The thaw throttle has been reached for this period. Throttled, - /// The operation would result in a receipt worth an insignficant value. + /// The operation would result in a receipt worth an insignificant value. MakesDust, /// The receipt is already communal. AlreadyCommunal, diff --git a/substrate/frame/nis/src/tests.rs b/substrate/frame/nis/src/tests.rs index 7350da97dc60a65747dc6b195510781ea149b73b..01724999ae7e9fb346f940be776e8b1273ea98f4 100644 --- a/substrate/frame/nis/src/tests.rs +++ b/substrate/frame/nis/src/tests.rs @@ -414,7 +414,7 @@ fn thaw_respects_transfers() { assert_eq!(Balances::reserved_balance(&1), 0); assert_eq!(Balances::reserved_balance(&2), 40); - // Transfering the receipt... + // Transferring the receipt... assert_noop!(Nis::thaw_private(signed(1), 0, None), Error::::NotOwner); // ...and thawing is possible. diff --git a/substrate/frame/node-authorization/Cargo.toml b/substrate/frame/node-authorization/Cargo.toml index a39b0ec4eff8b7fce397866acc7be3a8b812c827..63376163cdcb78baa03a266189d3e52ee13466a7 100644 --- a/substrate/frame/node-authorization/Cargo.toml +++ b/substrate/frame/node-authorization/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } sp-core = { path = "../../primitives/core", default-features = false } diff --git a/substrate/frame/node-authorization/src/lib.rs b/substrate/frame/node-authorization/src/lib.rs index 8a823d29f2355bc6b3cbf4fe8e25906448f2888b..9019a863ad8124fdb8aacac6a8e532d860861730 100644 --- a/substrate/frame/node-authorization/src/lib.rs +++ b/substrate/frame/node-authorization/src/lib.rs @@ -18,7 +18,7 @@ //! # Node authorization pallet //! //! This pallet manages a configurable set of nodes for a permissioned network. -//! Each node is dentified by a PeerId (i.e. `Vec`). It provides two ways to +//! Each node is identified by a PeerId (i.e. `Vec`). It provides two ways to //! authorize a node, //! //! - a set of well known nodes across different organizations in which the @@ -102,7 +102,7 @@ pub mod pallet { #[pallet::getter(fn owners)] pub type Owners = StorageMap<_, Blake2_128Concat, PeerId, T::AccountId>; - /// The additional adapative connections of each node. + /// The additional adaptive connections of each node. #[pallet::storage] #[pallet::getter(fn additional_connection)] pub type AdditionalConnections = @@ -161,7 +161,7 @@ pub mod pallet { NotClaimed, /// You are not the owner of the node. NotOwner, - /// No permisson to perform specific operation. + /// No permission to perform specific operation. PermissionDenied, } @@ -377,7 +377,7 @@ pub mod pallet { /// Add additional connections to a given node. /// /// - `node`: identifier of the node. - /// - `connections`: additonal nodes from which the connections are allowed. + /// - `connections`: additional nodes from which the connections are allowed. #[pallet::call_index(7)] #[pallet::weight(T::WeightInfo::add_connections())] pub fn add_connections( @@ -412,7 +412,7 @@ pub mod pallet { /// Remove additional connections of a given node. /// /// - `node`: identifier of the node. - /// - `connections`: additonal nodes from which the connections are not allowed anymore. + /// - `connections`: additional nodes from which the connections are not allowed anymore. #[pallet::call_index(8)] #[pallet::weight(T::WeightInfo::remove_connections())] pub fn remove_connections( diff --git a/substrate/frame/nomination-pools/Cargo.toml b/substrate/frame/nomination-pools/Cargo.toml index 9830f31d5fa9accedeb5edbde0c526df4ecefee2..55e9ef6fbd3382f65f2d4be3cfdb5eed1b318345 100644 --- a/substrate/frame/nomination-pools/Cargo.toml +++ b/substrate/frame/nomination-pools/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = [ +scale-info = { version = "2.11.1", default-features = false, features = [ "derive", ] } diff --git a/substrate/frame/nomination-pools/benchmarking/Cargo.toml b/substrate/frame/nomination-pools/benchmarking/Cargo.toml index 3693ad1866dd4ca0dfeb5d25d9844a3c0b28c111..4985d7acbec92f8258f32338bc348981922c53a9 100644 --- a/substrate/frame/nomination-pools/benchmarking/Cargo.toml +++ b/substrate/frame/nomination-pools/benchmarking/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # parity codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # FRAME frame-benchmarking = { path = "../../benchmarking", default-features = false } diff --git a/substrate/frame/nomination-pools/benchmarking/src/lib.rs b/substrate/frame/nomination-pools/benchmarking/src/lib.rs index 48d7dae29ef03f9e6615df19d0092f662ce3cda2..f7df173ec04eab4c681757ec21d01654b9fff9a6 100644 --- a/substrate/frame/nomination-pools/benchmarking/src/lib.rs +++ b/substrate/frame/nomination-pools/benchmarking/src/lib.rs @@ -795,9 +795,9 @@ frame_benchmarking::benchmarks! { T::Staking::active_stake(&pool_account).unwrap(), min_create_bond + min_join_bond ); - }:_(RuntimeOrigin::Signed(joiner.clone()), ClaimPermission::PermissionlessAll) + }:_(RuntimeOrigin::Signed(joiner.clone()), ClaimPermission::Permissioned) verify { - assert_eq!(ClaimPermissions::::get(joiner), ClaimPermission::PermissionlessAll); + assert_eq!(ClaimPermissions::::get(joiner), ClaimPermission::Permissioned); } claim_commission { diff --git a/substrate/frame/nomination-pools/src/lib.rs b/substrate/frame/nomination-pools/src/lib.rs index a0dbdb3aaa47f6108d6b1f0fec92dc4d6826d624..23501cd89d209d41d2a8b4df6a562a5d0acefbf5 100644 --- a/substrate/frame/nomination-pools/src/lib.rs +++ b/substrate/frame/nomination-pools/src/lib.rs @@ -451,7 +451,7 @@ enum AccountType { /// The permission a pool member can set for other accounts to claim rewards on their behalf. #[derive(Encode, Decode, MaxEncodedLen, Clone, Copy, Debug, PartialEq, Eq, TypeInfo)] pub enum ClaimPermission { - /// Only the pool member themself can claim their rewards. + /// Only the pool member themselves can claim their rewards. Permissioned, /// Anyone can compound rewards on a pool member's behalf. PermissionlessCompound, @@ -461,22 +461,26 @@ pub enum ClaimPermission { PermissionlessAll, } +impl Default for ClaimPermission { + fn default() -> Self { + Self::PermissionlessWithdraw + } +} + impl ClaimPermission { + /// Permissionless compounding of pool rewards is allowed if the current permission is + /// `PermissionlessCompound`, or permissionless. fn can_bond_extra(&self) -> bool { matches!(self, ClaimPermission::PermissionlessAll | ClaimPermission::PermissionlessCompound) } + /// Permissionless payout claiming is allowed if the current permission is + /// `PermissionlessWithdraw`, or permissionless. fn can_claim_payout(&self) -> bool { matches!(self, ClaimPermission::PermissionlessAll | ClaimPermission::PermissionlessWithdraw) } } -impl Default for ClaimPermission { - fn default() -> Self { - Self::Permissioned - } -} - /// A member in a pool. #[derive( Encode, @@ -2066,7 +2070,7 @@ pub mod pallet { /// The member will earn rewards pro rata based on the members stake vs the sum of the /// members in the pools stake. Rewards do not "expire". /// - /// See `claim_payout_other` to caim rewards on bahalf of some `other` pool member. + /// See `claim_payout_other` to claim rewards on behalf of some `other` pool member. #[pallet::call_index(2)] #[pallet::weight(T::WeightInfo::claim_payout())] pub fn claim_payout(origin: OriginFor) -> DispatchResult { @@ -2630,7 +2634,7 @@ pub mod pallet { /// /// In the case of `origin != other`, `origin` can only bond extra pending rewards of /// `other` members assuming set_claim_permission for the given member is - /// `PermissionlessAll` or `PermissionlessCompound`. + /// `PermissionlessCompound` or `PermissionlessAll`. #[pallet::call_index(14)] #[pallet::weight( T::WeightInfo::bond_extra_transfer() @@ -2648,15 +2652,10 @@ pub mod pallet { /// Allows a pool member to set a claim permission to allow or disallow permissionless /// bonding and withdrawing. /// - /// By default, this is `Permissioned`, which implies only the pool member themselves can - /// claim their pending rewards. If a pool member wishes so, they can set this to - /// `PermissionlessAll` to allow any account to claim their rewards and bond extra to the - /// pool. - /// /// # Arguments /// /// * `origin` - Member of a pool. - /// * `actor` - Account to claim reward. // improve this + /// * `permission` - The permission to be applied. #[pallet::call_index(15)] #[pallet::weight(T::DbWeight::get().reads_writes(1, 1))] pub fn set_claim_permission( @@ -2666,16 +2665,18 @@ pub mod pallet { let who = ensure_signed(origin)?; ensure!(PoolMembers::::contains_key(&who), Error::::PoolMemberNotFound); + ClaimPermissions::::mutate(who, |source| { *source = permission; }); + Ok(()) } /// `origin` can claim payouts on some pool member `other`'s behalf. /// - /// Pool member `other` must have a `PermissionlessAll` or `PermissionlessWithdraw` in order - /// for this call to be successful. + /// Pool member `other` must have a `PermissionlessWithdraw` or `PermissionlessAll` claim + /// permission for this call to be successful. #[pallet::call_index(16)] #[pallet::weight(T::WeightInfo::claim_payout())] pub fn claim_payout_other(origin: OriginFor, other: T::AccountId) -> DispatchResult { @@ -2797,7 +2798,7 @@ pub mod pallet { /// Set or remove a pool's commission claim permission. /// /// Determines who can claim the pool's pending commission. Only the `Root` role of the pool - /// is able to conifigure commission claim permissions. + /// is able to configure commission claim permissions. #[pallet::call_index(22)] #[pallet::weight(T::WeightInfo::set_commission_claim_permission())] pub fn set_commission_claim_permission( @@ -2836,7 +2837,7 @@ pub mod pallet { assert!( T::Staking::bonding_duration() < TotalUnbondingPools::::get(), "There must be more unbonding pools then the bonding duration / - so a slash can be applied to relevant unboding pools. (We assume / + so a slash can be applied to relevant unbonding pools. (We assume / the bonding duration > slash deffer duration.", ); } diff --git a/substrate/frame/nomination-pools/src/migration.rs b/substrate/frame/nomination-pools/src/migration.rs index 14410339f59a4dc809187feb6d289b16ec52f79c..796b310862afcb8b897d4dafd66395e33e1970fb 100644 --- a/substrate/frame/nomination-pools/src/migration.rs +++ b/substrate/frame/nomination-pools/src/migration.rs @@ -17,7 +17,7 @@ use super::*; use crate::log; -use frame_support::traits::OnRuntimeUpgrade; +use frame_support::traits::{OnRuntimeUpgrade, UncheckedOnRuntimeUpgrade}; use sp_std::{collections::btree_map::BTreeMap, vec::Vec}; #[cfg(feature = "try-runtime")] @@ -70,7 +70,7 @@ pub mod unversioned { fn on_runtime_upgrade() -> Weight { let migrated = BondedPools::::count(); - // recalcuate the `TotalValueLocked` to compare with the current on-chain TVL which may + // recalculate the `TotalValueLocked` to compare with the current on-chain TVL which may // be out of sync. let tvl: BalanceOf = helpers::calculate_tvl_by_total_stake::(); let onchain_tvl = TotalValueLocked::::get(); @@ -132,7 +132,7 @@ pub mod v8 { } pub struct VersionUncheckedMigrateV7ToV8(sp_std::marker::PhantomData); - impl OnRuntimeUpgrade for VersionUncheckedMigrateV7ToV8 { + impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateV7ToV8 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, TryRuntimeError> { Ok(Vec::new()) @@ -211,7 +211,7 @@ pub(crate) mod v7 { CountedStorageMap, Twox64Concat, PoolId, V7BondedPoolInner>; pub struct VersionUncheckedMigrateV6ToV7(sp_std::marker::PhantomData); - impl OnRuntimeUpgrade for VersionUncheckedMigrateV6ToV7 { + impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateV6ToV7 { fn on_runtime_upgrade() -> Weight { let migrated = BondedPools::::count(); // The TVL should be the sum of all the funds that are actively staked and in the @@ -282,7 +282,7 @@ mod v6 { }) } } - impl OnRuntimeUpgrade for MigrateToV6 { + impl UncheckedOnRuntimeUpgrade for MigrateToV6 { fn on_runtime_upgrade() -> Weight { let mut success = 0u64; let mut fail = 0u64; diff --git a/substrate/frame/nomination-pools/src/tests.rs b/substrate/frame/nomination-pools/src/tests.rs index 8fb2b41b88a190df444162a58d0a134936d81272..32b3e9af3cd6da976e9db9062b6689d929a8ef5b 100644 --- a/substrate/frame/nomination-pools/src/tests.rs +++ b/substrate/frame/nomination-pools/src/tests.rs @@ -2441,16 +2441,10 @@ mod claim_payout { // given assert_eq!(Currency::free_balance(&10), 35); - // Permissioned by default - assert_noop!( - Pools::claim_payout_other(RuntimeOrigin::signed(80), 10), - Error::::DoesNotHavePermission - ); + // when - assert_ok!(Pools::set_claim_permission( - RuntimeOrigin::signed(10), - ClaimPermission::PermissionlessWithdraw - )); + // NOTE: Claim permission of `PermissionlessWithdraw` allows payout claiming as default, + // so a claim permission does not need to be set for non-pool members prior to claiming. assert_ok!(Pools::claim_payout_other(RuntimeOrigin::signed(80), 10)); // then @@ -2489,7 +2483,6 @@ mod unbond { ); // Make permissionless - assert_eq!(ClaimPermissions::::get(10), ClaimPermission::Permissioned); assert_ok!(Pools::set_claim_permission( RuntimeOrigin::signed(20), ClaimPermission::PermissionlessAll @@ -4563,12 +4556,11 @@ mod withdraw_unbonded { CurrentEra::set(1); assert_eq!(PoolMembers::::get(20).unwrap().points, 20); - assert_ok!(Pools::set_claim_permission( - RuntimeOrigin::signed(20), - ClaimPermission::PermissionlessAll - )); assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 20)); - assert_eq!(ClaimPermissions::::get(20), ClaimPermission::PermissionlessAll); + assert_eq!( + ClaimPermissions::::get(20), + ClaimPermission::PermissionlessWithdraw + ); assert_eq!( pool_events_since_last_call(), @@ -4792,7 +4784,7 @@ mod create { } #[test] -fn set_claimable_actor_works() { +fn set_claim_permission_works() { ExtBuilder::default().build_and_execute(|| { // Given Currency::set_balance(&11, ExistentialDeposit::get() + 2); @@ -4811,22 +4803,19 @@ fn set_claimable_actor_works() { ] ); - // Make permissionless - assert_eq!(ClaimPermissions::::get(11), ClaimPermission::Permissioned); + // Make permissioned + assert_eq!(ClaimPermissions::::get(11), ClaimPermission::PermissionlessWithdraw); assert_noop!( - Pools::set_claim_permission( - RuntimeOrigin::signed(12), - ClaimPermission::PermissionlessAll - ), + Pools::set_claim_permission(RuntimeOrigin::signed(12), ClaimPermission::Permissioned), Error::::PoolMemberNotFound ); assert_ok!(Pools::set_claim_permission( RuntimeOrigin::signed(11), - ClaimPermission::PermissionlessAll + ClaimPermission::Permissioned )); // then - assert_eq!(ClaimPermissions::::get(11), ClaimPermission::PermissionlessAll); + assert_eq!(ClaimPermissions::::get(11), ClaimPermission::Permissioned); }); } @@ -5224,7 +5213,7 @@ mod bond_extra { assert_ok!(Pools::set_claim_permission( RuntimeOrigin::signed(10), - ClaimPermission::PermissionlessAll + ClaimPermission::PermissionlessCompound )); assert_ok!(Pools::bond_extra_other(RuntimeOrigin::signed(50), 10, BondExtra::Rewards)); assert_eq!(Currency::free_balance(&default_reward_account()), 7); diff --git a/substrate/frame/nomination-pools/test-staking/Cargo.toml b/substrate/frame/nomination-pools/test-staking/Cargo.toml index 9c7b12e4c6345b2080ef4b989171929c505c8a40..130a27752bf37768d34bcbc1012f51524c2df277 100644 --- a/substrate/frame/nomination-pools/test-staking/Cargo.toml +++ b/substrate/frame/nomination-pools/test-staking/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dev-dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } -scale-info = { version = "2.10.0", features = ["derive"] } +scale-info = { version = "2.11.1", features = ["derive"] } sp-runtime = { path = "../../../primitives/runtime" } sp-io = { path = "../../../primitives/io" } diff --git a/substrate/frame/offences/Cargo.toml b/substrate/frame/offences/Cargo.toml index b3ef4671ce56f4b66d7e7cb4ce7ec7faec05d3fe..f8efc88bafc1aa3ec4bceb9150d62268499d62b3 100644 --- a/substrate/frame/offences/Cargo.toml +++ b/substrate/frame/offences/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true, default-features = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/offences/benchmarking/Cargo.toml b/substrate/frame/offences/benchmarking/Cargo.toml index 8dcce84d257e596609e4062f8a46d684a72b2683..07905a1e0aa42d2aa859046b595569e275fe778a 100644 --- a/substrate/frame/offences/benchmarking/Cargo.toml +++ b/substrate/frame/offences/benchmarking/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../../benchmarking", default-features = false } frame-election-provider-support = { path = "../../election-provider-support", default-features = false } frame-support = { path = "../../support", default-features = false } diff --git a/substrate/frame/offences/src/tests.rs b/substrate/frame/offences/src/tests.rs index d525c7c3ab1d920e07273f26168d3711517e5827..4897b78f3e4d37ccabaad6a5fa1b3c6acbac45b2 100644 --- a/substrate/frame/offences/src/tests.rs +++ b/substrate/frame/offences/src/tests.rs @@ -204,7 +204,7 @@ fn reports_if_an_offence_is_dup() { assert_eq!(Offences::report_offence(vec![], test_offence.clone()), Ok(())); // creating a new offence for the same authorities on the next slot - // should be considered a new offence and thefore not known + // should be considered a new offence and therefore not known let test_offence_next_slot = offence(time_slot + 1, vec![0, 1]); assert!(!>::is_known_offence( &test_offence_next_slot.offenders, diff --git a/substrate/frame/paged-list/Cargo.toml b/substrate/frame/paged-list/Cargo.toml index 6a2af120f32a914452fd67279a18f8cabe54025b..26f3d7e48ced011697194abe734f87bee84756ee 100644 --- a/substrate/frame/paged-list/Cargo.toml +++ b/substrate/frame/paged-list/Cargo.toml @@ -16,8 +16,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -docify = "0.2.7" -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +docify = "0.2.8" +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/paged-list/src/paged_list.rs b/substrate/frame/paged-list/src/paged_list.rs index 75467f3ceeb582e04c8e02eb7629637358a760e1..eecc728cd62a1dcd29c4db1d48a642102421be9c 100644 --- a/substrate/frame/paged-list/src/paged_list.rs +++ b/substrate/frame/paged-list/src/paged_list.rs @@ -190,7 +190,7 @@ impl Page { let values = sp_io::storage::get(&key) .and_then(|raw| sp_std::vec::Vec::::decode(&mut &raw[..]).ok())?; if values.is_empty() { - // Dont create empty pages. + // Don't create empty pages. return None } let values = values.into_iter().skip(value_index as usize); diff --git a/substrate/frame/parameters/Cargo.toml b/substrate/frame/parameters/Cargo.toml index 07ebeea52d5298be0db8b00c09ab73641d20080b..2527bdf3a71fa8e9cedc28d163c851bdebd66d73 100644 --- a/substrate/frame/parameters/Cargo.toml +++ b/substrate/frame/parameters/Cargo.toml @@ -9,10 +9,10 @@ edition.workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["max-encoded-len"] } -scale-info = { version = "2.1.2", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } paste = { version = "1.0.14", default-features = false } serde = { features = ["derive"], optional = true, workspace = true, default-features = true } -docify = "0.2.5" +docify = "0.2.8" frame-support = { path = "../support", default-features = false, features = ["experimental"] } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/parameters/src/lib.rs b/substrate/frame/parameters/src/lib.rs index 3e54eb6d67f40481b29b4011e257ccfb337c0082..55a6f1ff91ded0222e2596c4ded133a5a5bc0719 100644 --- a/substrate/frame/parameters/src/lib.rs +++ b/substrate/frame/parameters/src/lib.rs @@ -122,7 +122,7 @@ use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; use frame_support::traits::{ - dynamic_params::{AggregratedKeyValue, IntoKey, Key, RuntimeParameterStore, TryIntoKey}, + dynamic_params::{AggregatedKeyValue, IntoKey, Key, RuntimeParameterStore, TryIntoKey}, EnsureOriginWithArg, }; @@ -135,10 +135,10 @@ pub use pallet::*; pub use weights::WeightInfo; /// The key type of a parameter. -type KeyOf = <::RuntimeParameters as AggregratedKeyValue>::Key; +type KeyOf = <::RuntimeParameters as AggregatedKeyValue>::Key; /// The value type of a parameter. -type ValueOf = <::RuntimeParameters as AggregratedKeyValue>::Value; +type ValueOf = <::RuntimeParameters as AggregatedKeyValue>::Value; #[frame_support::pallet] pub mod pallet { @@ -154,7 +154,7 @@ pub mod pallet { /// /// Usually created by [`frame_support::dynamic_params`] or equivalent. #[pallet::no_default_bounds] - type RuntimeParameters: AggregratedKeyValue; + type RuntimeParameters: AggregatedKeyValue; /// The origin which may update a parameter. /// @@ -175,11 +175,11 @@ pub mod pallet { /// Is also emitted when the value was not changed. Updated { /// The key that was updated. - key: ::Key, + key: ::Key, /// The old value before this call. - old_value: Option<::Value>, + old_value: Option<::Value>, /// The new value after this call. - new_value: Option<::Value>, + new_value: Option<::Value>, }, } @@ -245,23 +245,23 @@ pub mod pallet { } impl RuntimeParameterStore for Pallet { - type AggregratedKeyValue = T::RuntimeParameters; + type AggregatedKeyValue = T::RuntimeParameters; fn get(key: K) -> Option where - KV: AggregratedKeyValue, - K: Key + Into<::Key>, - ::Key: IntoKey< - <::AggregratedKeyValue as AggregratedKeyValue>::Key, + KV: AggregatedKeyValue, + K: Key + Into<::Key>, + ::Key: IntoKey< + <::AggregatedKeyValue as AggregatedKeyValue>::Key, >, - <::AggregratedKeyValue as AggregratedKeyValue>::Value: - TryIntoKey<::Value>, - ::Value: TryInto, + <::AggregatedKeyValue as AggregatedKeyValue>::Value: + TryIntoKey<::Value>, + ::Value: TryInto, { - let key: ::Key = key.into(); + let key: ::Key = key.into(); let val = Parameters::::get(key.into_key()); val.and_then(|v| { - let val: ::Value = v.try_into_key().ok()?; + let val: ::Value = v.try_into_key().ok()?; let val: K::WrappedValue = val.try_into().ok()?; let val = val.into(); Some(val) diff --git a/substrate/frame/parameters/src/tests/unit.rs b/substrate/frame/parameters/src/tests/unit.rs index d3f11ba96403514fbe77ba89f57872e85cbf655a..d811a83564657d75759f652a849402f2b912422f 100644 --- a/substrate/frame/parameters/src/tests/unit.rs +++ b/substrate/frame/parameters/src/tests/unit.rs @@ -25,7 +25,7 @@ use crate::tests::mock::{ RuntimeParametersValue, }; use codec::Encode; -use frame_support::{assert_noop, assert_ok, traits::dynamic_params::AggregratedKeyValue}; +use frame_support::{assert_noop, assert_ok, traits::dynamic_params::AggregatedKeyValue}; use sp_core::Get; use sp_runtime::DispatchError; diff --git a/substrate/frame/preimage/Cargo.toml b/substrate/frame/preimage/Cargo.toml index 10a15f97bd5a72417243b86c81cc7b3ddf1db2a6..d67fc7bead04c4dc01aca6494d823c786d69715c 100644 --- a/substrate/frame/preimage/Cargo.toml +++ b/substrate/frame/preimage/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/proxy/Cargo.toml b/substrate/frame/proxy/Cargo.toml index 17930079afd226da977d4796f4bb29ef690f9d3b..0a3b39e471d429f6bc5cfdec203aa085d8f6144e 100644 --- a/substrate/frame/proxy/Cargo.toml +++ b/substrate/frame/proxy/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["max-encoded-len"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/proxy/src/lib.rs b/substrate/frame/proxy/src/lib.rs index 4d4da0433afa015a6c33e8ee68e1e1c90851ce97..2b3fac5f59e4b1fd48f92bffc314cdbad5ec9c22 100644 --- a/substrate/frame/proxy/src/lib.rs +++ b/substrate/frame/proxy/src/lib.rs @@ -119,7 +119,7 @@ pub mod pallet { /// The currency mechanism. type Currency: ReservableCurrency; - /// A kind of proxy; specified with the proxy and passed in to the `IsProxyable` fitler. + /// A kind of proxy; specified with the proxy and passed in to the `IsProxyable` filter. /// The instance filter determines whether a given call may be proxied under this type. /// /// IMPORTANT: `Default` must be provided and MUST BE the the *most permissive* value. diff --git a/substrate/frame/ranked-collective/Cargo.toml b/substrate/frame/ranked-collective/Cargo.toml index 54e84c0b55887d55017f8c16447424a594185f10..0a6595807750388d3a99016276f69d57772bbba7 100644 --- a/substrate/frame/ranked-collective/Cargo.toml +++ b/substrate/frame/ranked-collective/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/recovery/Cargo.toml b/substrate/frame/recovery/Cargo.toml index 421c799513731d06daf9ae704a940819443405bd..43608de37fc3c08ecb9a266388c2b2b8b07d0255 100644 --- a/substrate/frame/recovery/Cargo.toml +++ b/substrate/frame/recovery/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/referenda/Cargo.toml b/substrate/frame/referenda/Cargo.toml index 2dfb25a2fd3a5b50bdc7828e50c8e247cd7f936a..f4e0171443aa5181e13866edf07a98e6d1b0bf8e 100644 --- a/substrate/frame/referenda/Cargo.toml +++ b/substrate/frame/referenda/Cargo.toml @@ -20,7 +20,7 @@ assert_matches = { version = "1.5", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], optional = true, workspace = true, default-features = true } sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/referenda/src/lib.rs b/substrate/frame/referenda/src/lib.rs index e616056c3022abe796cd494e91fa4e9565843e60..fbe27e1a4784733f517bb6ee3259ebdc4b1075b3 100644 --- a/substrate/frame/referenda/src/lib.rs +++ b/substrate/frame/referenda/src/lib.rs @@ -424,6 +424,8 @@ pub mod pallet { BadStatus, /// The preimage does not exist. PreimageNotExist, + /// The preimage is stored with a different length than the one provided. + PreimageStoredWithDifferentLength, } #[pallet::hooks] @@ -462,6 +464,16 @@ pub mod pallet { let proposal_origin = *proposal_origin; let who = T::SubmitOrigin::ensure_origin(origin, &proposal_origin)?; + // If the pre-image is already stored, ensure that it has the same length as given in + // `proposal`. + if let (Some(preimage_len), Some(proposal_len)) = + (proposal.lookup_hash().and_then(|h| T::Preimages::len(&h)), proposal.lookup_len()) + { + if preimage_len != proposal_len { + return Err(Error::::PreimageStoredWithDifferentLength.into()) + } + } + let track = T::Tracks::track_for(&proposal_origin).map_err(|_| Error::::NoTrack)?; let submission_deposit = Self::take_deposit(who, T::SubmissionDeposit::get())?; diff --git a/substrate/frame/referenda/src/tests.rs b/substrate/frame/referenda/src/tests.rs index 8f51136de0bfdb48df1a43e47dd0de63f7a0e21d..52251fcbdbeed0ec5b8830e87a2a4c66bb265e39 100644 --- a/substrate/frame/referenda/src/tests.rs +++ b/substrate/frame/referenda/src/tests.rs @@ -666,3 +666,19 @@ fn clear_metadata_works() { })); }); } + +#[test] +fn detects_incorrect_len() { + ExtBuilder::default().build_and_execute(|| { + let hash = note_preimage(1); + assert_noop!( + Referenda::submit( + RuntimeOrigin::signed(1), + Box::new(RawOrigin::Root.into()), + frame_support::traits::Bounded::Lookup { hash, len: 3 }, + DispatchTime::At(1), + ), + Error::::PreimageStoredWithDifferentLength + ); + }); +} diff --git a/substrate/frame/remark/Cargo.toml b/substrate/frame/remark/Cargo.toml index 45710f0539e255b48caf4e7dce9a99f46600d4e5..e746b0382aea105009861aa14d4a9d9c63940556 100644 --- a/substrate/frame/remark/Cargo.toml +++ b/substrate/frame/remark/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true, default-features = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/root-offences/Cargo.toml b/substrate/frame/root-offences/Cargo.toml index 80b3309506815223e77ab4658dcddaf1cef859f6..ad3dcf1f90eaf48d95cac601d397c2be673d9824 100644 --- a/substrate/frame/root-offences/Cargo.toml +++ b/substrate/frame/root-offences/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } pallet-session = { path = "../session", default-features = false, features = ["historical"] } pallet-staking = { path = "../staking", default-features = false } diff --git a/substrate/frame/root-testing/Cargo.toml b/substrate/frame/root-testing/Cargo.toml index 51b72665b81a5edef0ec0709c26450de5a59eaeb..bf14516ee32281558cbfba28d23f1e9f8526350a 100644 --- a/substrate/frame/root-testing/Cargo.toml +++ b/substrate/frame/root-testing/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } sp-core = { path = "../../primitives/core", default-features = false } diff --git a/substrate/frame/root-testing/src/lib.rs b/substrate/frame/root-testing/src/lib.rs index 51fd835409aef4bfe58f8f97c4df88e130c965a5..98e1f5c5b668d04f7638087bda9be9d81bc12484 100644 --- a/substrate/frame/root-testing/src/lib.rs +++ b/substrate/frame/root-testing/src/lib.rs @@ -17,7 +17,7 @@ //! # Root Testing Pallet //! -//! Pallet that contains extrinsics that can be usefull in testing. +//! Pallet that contains extrinsics that can be useful in testing. //! //! NOTE: This pallet should only be used for testing purposes and should not be used in production //! runtimes! diff --git a/substrate/frame/safe-mode/Cargo.toml b/substrate/frame/safe-mode/Cargo.toml index 0c59740bef30a8169490ddd27be7898b975c82ff..b6b7e5a67e481a75914207410afe64a8c5b38280 100644 --- a/substrate/frame/safe-mode/Cargo.toml +++ b/substrate/frame/safe-mode/Cargo.toml @@ -16,11 +16,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -docify = "0.2.7" +docify = "0.2.8" frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false } sp-std = { path = "../../primitives/std", default-features = false } diff --git a/substrate/frame/salary/Cargo.toml b/substrate/frame/salary/Cargo.toml index ba57fd46eebb44f0164ffd8aa2a5edf936ab3835..8c77edcb173a03ed4ed8a8c039c3e670d4bf02ce 100644 --- a/substrate/frame/salary/Cargo.toml +++ b/substrate/frame/salary/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/salary/src/tests/integration.rs b/substrate/frame/salary/src/tests/integration.rs index 26afb2d8abafee0bbb816fcf365164afcb71495f..124ab38c5651b1e3c57d0457952eda8cd5b92e98 100644 --- a/substrate/frame/salary/src/tests/integration.rs +++ b/substrate/frame/salary/src/tests/integration.rs @@ -250,7 +250,7 @@ fn swap_exhaustive_works() { }); assert_eq!(root_add, root_swap); - // Ensure that we dont compare trivial stuff like `()` from a type error above. + // Ensure that we don't compare trivial stuff like `()` from a type error above. assert_eq!(root_add.len(), 32); }); } diff --git a/substrate/frame/salary/src/tests/unit.rs b/substrate/frame/salary/src/tests/unit.rs index 5e30835411603c6f68e6c5bb600b6d831688a6a0..db1c8b947ef57c7c1919568f5bf0545f0449994d 100644 --- a/substrate/frame/salary/src/tests/unit.rs +++ b/substrate/frame/salary/src/tests/unit.rs @@ -508,7 +508,7 @@ fn zero_payment_fails() { } #[test] -fn unregistered_bankrupcy_fails_gracefully() { +fn unregistered_bankruptcy_fails_gracefully() { new_test_ext().execute_with(|| { assert_ok!(Salary::init(RuntimeOrigin::signed(1))); set_rank(1, 2); @@ -532,7 +532,7 @@ fn unregistered_bankrupcy_fails_gracefully() { } #[test] -fn registered_bankrupcy_fails_gracefully() { +fn registered_bankruptcy_fails_gracefully() { new_test_ext().execute_with(|| { assert_ok!(Salary::init(RuntimeOrigin::signed(1))); set_rank(1, 2); @@ -561,7 +561,7 @@ fn registered_bankrupcy_fails_gracefully() { } #[test] -fn mixed_bankrupcy_fails_gracefully() { +fn mixed_bankruptcy_fails_gracefully() { new_test_ext().execute_with(|| { assert_ok!(Salary::init(RuntimeOrigin::signed(1))); set_rank(1, 2); @@ -589,7 +589,7 @@ fn mixed_bankrupcy_fails_gracefully() { } #[test] -fn other_mixed_bankrupcy_fails_gracefully() { +fn other_mixed_bankruptcy_fails_gracefully() { new_test_ext().execute_with(|| { assert_ok!(Salary::init(RuntimeOrigin::signed(1))); set_rank(1, 2); diff --git a/substrate/frame/sassafras/Cargo.toml b/substrate/frame/sassafras/Cargo.toml index 325a39bf59799f6b314d54ad131b892eec61ec44..09977142efc89a95b982ddef69be8d7552e5eee7 100644 --- a/substrate/frame/sassafras/Cargo.toml +++ b/substrate/frame/sassafras/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] scale-codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/sassafras/src/benchmarking.rs b/substrate/frame/sassafras/src/benchmarking.rs index 1c9626ad260c80c6c242a9d3733c11bb9641366e..2b2467c6f84ddf431425605834a7f6b006795a95 100644 --- a/substrate/frame/sassafras/src/benchmarking.rs +++ b/substrate/frame/sassafras/src/benchmarking.rs @@ -84,7 +84,7 @@ mod benchmarks { // - load the full ring context. // - recompute the ring verifier. // - sorting the epoch tickets in one shot - // (here we account for the very unluky scenario where we haven't done any sort work yet) + // (here we account for the very unlucky scenario where we haven't done any sort work yet) // - pending epoch change config. // // For this bench we assume a redundancy factor of 2 (suggested value to be used in prod). diff --git a/substrate/frame/sassafras/src/lib.rs b/substrate/frame/sassafras/src/lib.rs index 0ee8657489b7f7321d24b209dfddcd086cf1cf37..8cbf1e47e3203a208578abb1b8b8431cbfd22ebb 100644 --- a/substrate/frame/sassafras/src/lib.rs +++ b/substrate/frame/sassafras/src/lib.rs @@ -234,7 +234,7 @@ pub mod pallet { /// Epoch X first N-th ticket has key (X mod 2, N) /// /// Note that the ticket's index doesn't directly correspond to the slot index within the epoch. - /// The assigment is computed dynamically using an *outside-in* strategy. + /// The assignment is computed dynamically using an *outside-in* strategy. /// /// Be aware that entries within this map are never removed, only overwritten. /// Last element index should be fetched from the [`TicketsMeta`] value. @@ -465,7 +465,7 @@ pub mod pallet { /// Plan an epoch configuration change. /// - /// The epoch configuration change is recorded and will be announced at the begining + /// The epoch configuration change is recorded and will be announced at the beginning /// of the next epoch together with next epoch authorities information. /// In other words, the configuration will be enacted one epoch later. /// @@ -758,11 +758,11 @@ impl Pallet { let randomness = hashing::blake2_256(buf.as_slice()); RandomnessAccumulator::::put(randomness); - let next_randoness = Self::update_epoch_randomness(1); + let next_randomness = Self::update_epoch_randomness(1); // Deposit a log as this is the first block in first epoch. let next_epoch = NextEpochDescriptor { - randomness: next_randoness, + randomness: next_randomness, authorities: Self::next_authorities().into_inner(), config: None, }; diff --git a/substrate/frame/scheduler/Cargo.toml b/substrate/frame/scheduler/Cargo.toml index f50f6afdc06379f0ffaca3460df314e1eb0e4945..40a71736447076d10fd6d8f668a019581f5911f3 100644 --- a/substrate/frame/scheduler/Cargo.toml +++ b/substrate/frame/scheduler/Cargo.toml @@ -15,7 +15,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } @@ -23,7 +23,7 @@ sp-io = { path = "../../primitives/io", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false } sp-std = { path = "../../primitives/std", default-features = false } sp-weights = { path = "../../primitives/weights", default-features = false } -docify = "0.2.7" +docify = "0.2.8" [dev-dependencies] pallet-preimage = { path = "../preimage" } diff --git a/substrate/frame/scheduler/README.md b/substrate/frame/scheduler/README.md index 6aec2ddb0e4325e8ec4bbe3987ad2ee16a12cd52..5e233fdbdb0e9babd08dd73c8ca4942c718c4a58 100644 --- a/substrate/frame/scheduler/README.md +++ b/substrate/frame/scheduler/README.md @@ -16,7 +16,7 @@ for the origin: namely `frame_system::Config::BaseCallFilter` for all origin except root which will get no filter. And not the filter contained in origin use to call `fn schedule`. -If a call is scheduled using proxy or whatever mecanism which adds filter, +If a call is scheduled using proxy or whatever mechanism which adds filter, then those filter will not be used when dispatching the schedule call. ## Interface diff --git a/substrate/frame/scheduler/src/benchmarking.rs b/substrate/frame/scheduler/src/benchmarking.rs index 18441d54b39a2244f1eb9677d38be2576e141d94..884f78000384cf66696b22de40238bbd2e5bef95 100644 --- a/substrate/frame/scheduler/src/benchmarking.rs +++ b/substrate/frame/scheduler/src/benchmarking.rs @@ -211,7 +211,7 @@ benchmarks! { } verify { } - // `execute_dispatch` when the origin is `Signed`, not counting the dispatable's weight. + // `execute_dispatch` when the origin is `Signed`, not counting the dispatchable's weight. execute_dispatch_signed { let mut counter = WeightMeter::new(); let origin = make_origin::(true); @@ -222,7 +222,7 @@ benchmarks! { verify { } - // `execute_dispatch` when the origin is not `Signed`, not counting the dispatable's weight. + // `execute_dispatch` when the origin is not `Signed`, not counting the dispatchable's weight. execute_dispatch_unsigned { let mut counter = WeightMeter::new(); let origin = make_origin::(false); diff --git a/substrate/frame/scheduler/src/lib.rs b/substrate/frame/scheduler/src/lib.rs index af3abd8ac4f2a81b363c4553f3652a2cba972212..d19a1e0001dd3aed98369b8691187b5cf8dcdfc7 100644 --- a/substrate/frame/scheduler/src/lib.rs +++ b/substrate/frame/scheduler/src/lib.rs @@ -46,7 +46,7 @@ //! 1. Scheduling a runtime call at a specific block. #![doc = docify::embed!("src/tests.rs", basic_scheduling_works)] //! -//! 2. Scheduling a preimage hash of a runtime call at a specifc block +//! 2. Scheduling a preimage hash of a runtime call at a specific block #![doc = docify::embed!("src/tests.rs", scheduling_with_preimages_works)] //! @@ -1267,6 +1267,17 @@ impl Pallet { id: task.maybe_id, }); + // It was not available when we needed it, so we don't need to have requested it + // anymore. + T::Preimages::drop(&task.call); + + // We don't know why `peek` failed, thus we most account here for the "full weight". + let _ = weight.try_consume(T::WeightInfo::service_task( + task.call.lookup_len().map(|x| x as usize), + task.maybe_id.is_some(), + task.maybe_periodic.is_some(), + )); + return Err((Unavailable, Some(task))) }, }; @@ -1435,6 +1446,7 @@ impl Pallet { } } +#[allow(deprecated)] impl schedule::v2::Anon, ::RuntimeCall, T::PalletsOrigin> for Pallet { @@ -1469,6 +1481,8 @@ impl schedule::v2::Anon, ::RuntimeCall } } +// TODO: migrate `schedule::v2::Anon` to `v3` +#[allow(deprecated)] impl schedule::v2::Named, ::RuntimeCall, T::PalletsOrigin> for Pallet { diff --git a/substrate/frame/scheduler/src/tests.rs b/substrate/frame/scheduler/src/tests.rs index 1ed2ca9e2f36d0b3bf4b3a02983ab1ab7539c30b..3023a370a4b6034e4ec9366fcaa065035edccec3 100644 --- a/substrate/frame/scheduler/src/tests.rs +++ b/substrate/frame/scheduler/src/tests.rs @@ -1119,7 +1119,7 @@ fn reschedule_named_works() { } #[test] -fn reschedule_named_perodic_works() { +fn reschedule_named_periodic_works() { new_test_ext().execute_with(|| { let call = RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_parts(10, 0) }); @@ -1501,8 +1501,11 @@ fn scheduler_handles_periodic_unavailable_preimage() { run_to_block(4); assert_eq!(logger::log().len(), 1); - // Unnote the preimage + // As the public api doesn't support to remove a noted preimage, we need to first unnote it + // and then request it again. Basically this should not happen in real life (whatever you + // call real life;). Preimage::unnote(&hash); + Preimage::request(&hash); // Does not ever execute again. run_to_block(100); @@ -2285,9 +2288,18 @@ fn postponed_named_task_cannot_be_rescheduled() { // Run to a very large block. run_to_block(10); + // It was not executed. assert!(logger::log().is_empty()); - assert!(Preimage::is_requested(&hash)); + + // Preimage was not available + assert_eq!( + System::events().last().unwrap().event, + crate::Event::CallUnavailable { task: (4, 0), id: Some(name) }.into() + ); + + // So it should not be requested. + assert!(!Preimage::is_requested(&hash)); // Postponing removes the lookup. assert!(!Lookup::::contains_key(name)); @@ -2307,11 +2319,12 @@ fn postponed_named_task_cannot_be_rescheduled() { ); // Finally add the preimage. - assert_ok!(Preimage::note(call.encode().into())); + assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(0), call.encode())); + run_to_block(1000); // It did not execute. assert!(logger::log().is_empty()); - assert!(Preimage::is_requested(&hash)); + assert!(!Preimage::is_requested(&hash)); // Manually re-schedule the call by name does not work. assert_err!( @@ -2980,7 +2993,7 @@ fn reschedule_named_last_task_removes_agenda() { }); } -/// Ensures that an unvailable call sends an event. +/// Ensures that an unavailable call sends an event. #[test] fn unavailable_call_is_detected() { use frame_support::traits::schedule::v3::Named; @@ -3008,6 +3021,8 @@ fn unavailable_call_is_detected() { // Ensure the preimage isn't available assert!(!Preimage::have(&bound)); + // But we have requested it + assert!(Preimage::is_requested(&hash)); // Executes in block 4. run_to_block(4); @@ -3016,5 +3031,7 @@ fn unavailable_call_is_detected() { System::events().last().unwrap().event, crate::Event::CallUnavailable { task: (4, 0), id: Some(name) }.into() ); + // It should not be requested anymore. + assert!(!Preimage::is_requested(&hash)); }); } diff --git a/substrate/frame/scored-pool/Cargo.toml b/substrate/frame/scored-pool/Cargo.toml index ae6ade5189d0f51b683f35f846c5391a569b163a..92b70e01b9ab7e37d0274b434b357eb654773e83 100644 --- a/substrate/frame/scored-pool/Cargo.toml +++ b/substrate/frame/scored-pool/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } sp-io = { path = "../../primitives/io", default-features = false } diff --git a/substrate/frame/session/Cargo.toml b/substrate/frame/session/Cargo.toml index de041307f7022678235d98372bf7119eb4d48eaa..86814f8276e7bed8625f726b17d5ea18c6cf604f 100644 --- a/substrate/frame/session/Cargo.toml +++ b/substrate/frame/session/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } pallet-timestamp = { path = "../timestamp", default-features = false } diff --git a/substrate/frame/session/benchmarking/Cargo.toml b/substrate/frame/session/benchmarking/Cargo.toml index 291fda3c4c7a4bfb56224593778288b7557daa95..a00fbd8f6fdf5452b1e225332efaac7f0d09adfb 100644 --- a/substrate/frame/session/benchmarking/Cargo.toml +++ b/substrate/frame/session/benchmarking/Cargo.toml @@ -29,7 +29,7 @@ sp-std = { path = "../../../primitives/std", default-features = false } [dev-dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } -scale-info = "2.10.0" +scale-info = "2.11.1" frame-election-provider-support = { path = "../../election-provider-support" } pallet-balances = { path = "../../balances" } pallet-staking-reward-curve = { path = "../../staking/reward-curve" } diff --git a/substrate/frame/society/Cargo.toml b/substrate/frame/society/Cargo.toml index 3dab082b3954b7bc1f5e20463c6df5192305550b..3d99ddba392bcee045d122e6b898552d0f5e9b75 100644 --- a/substrate/frame/society/Cargo.toml +++ b/substrate/frame/society/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = { workspace = true } rand_chacha = { version = "0.2", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } sp-std = { path = "../../primitives/std", default-features = false } diff --git a/substrate/frame/society/src/lib.rs b/substrate/frame/society/src/lib.rs index 99dd580a4035ed138b494d827956435e597ebe1c..5bce245f73f17e02d1301ee9242a356ad291b3ee 100644 --- a/substrate/frame/society/src/lib.rs +++ b/substrate/frame/society/src/lib.rs @@ -1509,7 +1509,7 @@ impl, I: 'static> Pallet { // the Founder. if MemberCount::::get() > 2 { let defender = next_defender - .or_else(|| Self::pick_defendent(rng)) + .or_else(|| Self::pick_defendant(rng)) .expect("exited if members empty; qed"); let skeptic = Self::pick_member_except(rng, &defender).expect("exited if members empty; qed"); @@ -1871,7 +1871,7 @@ impl, I: 'static> Pallet { /// /// If only the Founder and Head members exist (or the state is inconsistent), then `None` /// may be returned. - fn pick_defendent(rng: &mut impl RngCore) -> Option { + fn pick_defendant(rng: &mut impl RngCore) -> Option { let member_count = MemberCount::::get(); if member_count <= 2 { return None @@ -1976,7 +1976,7 @@ impl, I: 'static> Pallet { /// Transfer some `amount` from the main account into the payouts account and reduce the Pot /// by this amount. fn reserve_payout(amount: BalanceOf) { - // Tramsfer payout from the Pot into the payouts account. + // Transfer payout from the Pot into the payouts account. Pot::::mutate(|pot| pot.saturating_reduce(amount)); // this should never fail since we ensure we can afford the payouts in a previous @@ -1988,7 +1988,7 @@ impl, I: 'static> Pallet { /// Transfer some `amount` from the main account into the payouts account and increase the Pot /// by this amount. fn unreserve_payout(amount: BalanceOf) { - // Tramsfer payout from the Pot into the payouts account. + // Transfer payout from the Pot into the payouts account. Pot::::mutate(|pot| pot.saturating_accrue(amount)); // this should never fail since we ensure we can afford the payouts in a previous diff --git a/substrate/frame/society/src/migrations.rs b/substrate/frame/society/src/migrations.rs index 6a1029114519db8c41c56301197d04de26bfb6c8..7ded1f84f5823696e922a3ef98e4cce45191d21c 100644 --- a/substrate/frame/society/src/migrations.rs +++ b/substrate/frame/society/src/migrations.rs @@ -19,7 +19,7 @@ use super::*; use codec::{Decode, Encode}; -use frame_support::traits::{Defensive, DefensiveOption, Instance, OnRuntimeUpgrade}; +use frame_support::traits::{Defensive, DefensiveOption, Instance, UncheckedOnRuntimeUpgrade}; #[cfg(feature = "try-runtime")] use sp_runtime::TryRuntimeError; @@ -36,7 +36,7 @@ impl< T: Config, I: Instance + 'static, PastPayouts: Get::AccountId, BalanceOf)>>, - > OnRuntimeUpgrade for VersionUncheckedMigrateToV2 + > UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateToV2 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, TryRuntimeError> { @@ -240,7 +240,7 @@ pub fn assert_internal_consistency, I: Instance + 'static>() { pub fn from_original, I: Instance + 'static>( past_payouts: &mut [(::AccountId, BalanceOf)], ) -> Result { - // Migrate Bids from old::Bids (just a trunctation). + // Migrate Bids from old::Bids (just a truncation). Bids::::put(BoundedVec::<_, T::MaxBids>::truncate_from(v0::Bids::::take())); // Initialise round counter. @@ -287,13 +287,13 @@ pub fn from_original, I: Instance + 'static>( .defensive_ok_or("member_count > 0, we must have at least 1 member")?; // Swap the founder with the first member in MemberByIndex. MemberByIndex::::swap(0, member_count); - // Update the indicies of the swapped member MemberRecords. + // Update the indices of the swapped member MemberRecords. Members::::mutate(&member, |m| { if let Some(member) = m { member.index = 0; } else { frame_support::defensive!( - "Member somehow disapeared from storage after it was inserted" + "Member somehow disappeared from storage after it was inserted" ); } }); @@ -302,7 +302,7 @@ pub fn from_original, I: Instance + 'static>( member.index = member_count; } else { frame_support::defensive!( - "Member somehow disapeared from storage after it was queried" + "Member somehow disappeared from storage after it was queried" ); } }); diff --git a/substrate/frame/society/src/tests.rs b/substrate/frame/society/src/tests.rs index 411567e1dedfde46450c0e778bcb14bb00883962..5f8ecc9a7c52b5095b561db1fa93c749a8efd206 100644 --- a/substrate/frame/society/src/tests.rs +++ b/substrate/frame/society/src/tests.rs @@ -541,7 +541,7 @@ fn suspended_candidate_rejected_works() { assert_ok!(Society::vote(Origin::signed(30), x, true)); } - // Voting continues, as no canidate is clearly accepted yet and the founder chooses not to + // Voting continues, as no candidate is clearly accepted yet and the founder chooses not to // act. conclude_intake(false, None); assert_eq!(members(), vec![10, 20, 30]); diff --git a/substrate/frame/staking/Cargo.toml b/substrate/frame/staking/Cargo.toml index d2a46146931b8863ae347277db8076850dc76d68..4fd0cedbae63fb3c484549c2f4388f912e7a428d 100644 --- a/substrate/frame/staking/Cargo.toml +++ b/substrate/frame/staking/Cargo.toml @@ -20,7 +20,7 @@ serde = { features = ["alloc", "derive"], workspace = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } sp-io = { path = "../../primitives/io", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } sp-staking = { path = "../../primitives/staking", default-features = false, features = ["serde"] } @@ -40,10 +40,10 @@ frame-benchmarking = { path = "../benchmarking", default-features = false, optio rand_chacha = { version = "0.2", default-features = false, optional = true } [dev-dependencies] +pallet-balances = { path = "../balances" } sp-tracing = { path = "../../primitives/tracing" } sp-core = { path = "../../primitives/core" } sp-npos-elections = { path = "../../primitives/npos-elections" } -pallet-balances = { path = "../balances" } pallet-timestamp = { path = "../timestamp" } pallet-staking-reward-curve = { path = "reward-curve" } pallet-bags-list = { path = "../bags-list" } diff --git a/substrate/frame/staking/src/benchmarking.rs b/substrate/frame/staking/src/benchmarking.rs index a83060873973cbaf568fc729535ce514cd90fcd6..0b67cd46039515970293b16846bbe26f5812bee3 100644 --- a/substrate/frame/staking/src/benchmarking.rs +++ b/substrate/frame/staking/src/benchmarking.rs @@ -953,6 +953,15 @@ benchmarks! { assert_eq!(MinCommission::::get(), Perbill::from_percent(100)); } + restore_ledger { + let (stash, controller) = create_stash_controller::(0, 100, RewardDestination::Staked)?; + // corrupt ledger. + Ledger::::remove(controller); + }: _(RawOrigin::Root, stash.clone(), None, None, None) + verify { + assert_eq!(Staking::::inspect_bond_state(&stash), Ok(LedgerIntegrityState::Ok)); + } + impl_benchmark_test_suite!( Staking, crate::mock::ExtBuilder::default().has_stakers(true), diff --git a/substrate/frame/staking/src/ledger.rs b/substrate/frame/staking/src/ledger.rs index 67be1e15bc6e7d46b0335c009ff4ac15ee79a834..9461daefed65e74c778a248a0bbaeeedd3ee6222 100644 --- a/substrate/frame/staking/src/ledger.rs +++ b/substrate/frame/staking/src/ledger.rs @@ -130,7 +130,7 @@ impl StakingLedger { // if ledger bond is in a bad state, return error to prevent applying operations that may // further spoil the ledger's state. A bond is in bad state when the bonded controller is - // associted with a different ledger (i.e. a ledger with a different stash). + // associated with a different ledger (i.e. a ledger with a different stash). // // See for more details. ensure!( diff --git a/substrate/frame/staking/src/lib.rs b/substrate/frame/staking/src/lib.rs index 5a92b6c855f2d044e671d714d612ad1753ee3e28..f5b7e3eca3de7cd49b6a5cbdef37dfa4feff80ec 100644 --- a/substrate/frame/staking/src/lib.rs +++ b/substrate/frame/staking/src/lib.rs @@ -491,6 +491,20 @@ pub struct StakingLedger { controller: Option, } +/// State of a ledger with regards with its data and metadata integrity. +#[derive(PartialEq, Debug)] +enum LedgerIntegrityState { + /// Ledger, bond and corresponding staking lock is OK. + Ok, + /// Ledger and/or bond is corrupted. This means that the bond has a ledger with a different + /// stash than the bonded stash. + Corrupted, + /// Ledger was corrupted and it has been killed. + CorruptedKilled, + /// Ledger and bond are OK, however the ledger's stash lock is out of sync. + LockCorrupted, +} + impl StakingLedger { /// Remove entries from `unlocking` that are sufficiently old and reduce the /// total by the sum of their balances. diff --git a/substrate/frame/staking/src/migrations.rs b/substrate/frame/staking/src/migrations.rs index 3e3b1113a8198ba4d1718cf22f36c3568057c0ff..d5b18421d5b67fbeaac27cbbdecde174fa3d024b 100644 --- a/substrate/frame/staking/src/migrations.rs +++ b/substrate/frame/staking/src/migrations.rs @@ -30,7 +30,7 @@ use frame_support::ensure; #[cfg(feature = "try-runtime")] use sp_runtime::TryRuntimeError; -/// Used for release versioning upto v12. +/// Used for release versioning up to v12. /// /// Obsolete from v13. Keeping around to make encoding/decoding of old migration code easier. #[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] diff --git a/substrate/frame/staking/src/mock.rs b/substrate/frame/staking/src/mock.rs index 6c2ea225ff1e116c2621d6c6a9bed01eb2ea04b2..6db462c1a70fbce4ed2d531bf9ad4d837e83b585 100644 --- a/substrate/frame/staking/src/mock.rs +++ b/substrate/frame/staking/src/mock.rs @@ -25,8 +25,8 @@ use frame_election_provider_support::{ use frame_support::{ assert_ok, derive_impl, ord_parameter_types, parameter_types, traits::{ - ConstU64, Currency, EitherOfDiverse, FindAuthor, Get, Hooks, Imbalance, OnUnbalanced, - OneSessionHandler, + ConstU64, Currency, EitherOfDiverse, FindAuthor, Get, Hooks, Imbalance, LockableCurrency, + OnUnbalanced, OneSessionHandler, WithdrawReasons, }, weights::constants::RocksDbWeight, }; @@ -786,55 +786,86 @@ pub(crate) fn bond_controller_stash(controller: AccountId, stash: AccountId) -> Ok(()) } +// simulates `set_controller` without corrupted ledger checks for testing purposes. +pub(crate) fn set_controller_no_checks(stash: &AccountId) { + let controller = Bonded::::get(stash).expect("testing stash should be bonded"); + let ledger = Ledger::::get(&controller).expect("testing ledger should exist"); + + Ledger::::remove(&controller); + Ledger::::insert(stash, ledger); + Bonded::::insert(stash, stash); +} + +// simulates `bond_extra` without corrupted ledger checks for testing purposes. +pub(crate) fn bond_extra_no_checks(stash: &AccountId, amount: Balance) { + let controller = Bonded::::get(stash).expect("bond must exist to bond_extra"); + let mut ledger = Ledger::::get(&controller).expect("ledger must exist to bond_extra"); + + let new_total = ledger.total + amount; + Balances::set_lock(crate::STAKING_ID, stash, new_total, WithdrawReasons::all()); + ledger.total = new_total; + ledger.active = new_total; + Ledger::::insert(controller, ledger); +} + pub(crate) fn setup_double_bonded_ledgers() { - assert_ok!(Staking::bond(RuntimeOrigin::signed(1), 10, RewardDestination::Staked)); - assert_ok!(Staking::bond(RuntimeOrigin::signed(2), 20, RewardDestination::Staked)); - assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 20, RewardDestination::Staked)); + let init_ledgers = Ledger::::iter().count(); + + let _ = Balances::make_free_balance_be(&333, 2000); + let _ = Balances::make_free_balance_be(&444, 2000); + let _ = Balances::make_free_balance_be(&555, 2000); + let _ = Balances::make_free_balance_be(&777, 2000); + + assert_ok!(Staking::bond(RuntimeOrigin::signed(333), 10, RewardDestination::Staked)); + assert_ok!(Staking::bond(RuntimeOrigin::signed(444), 20, RewardDestination::Staked)); + assert_ok!(Staking::bond(RuntimeOrigin::signed(555), 20, RewardDestination::Staked)); // not relevant to the test case, but ensures try-runtime checks pass. - [1, 2, 3] + [333, 444, 555] .iter() .for_each(|s| Payee::::insert(s, RewardDestination::Staked)); // we want to test the case where a controller can also be a stash of another ledger. // for that, we change the controller/stash bonding so that: - // * 2 becomes controller of 1. - // * 3 becomes controller of 2. - // * 4 becomes controller of 3. - let ledger_1 = Ledger::::get(1).unwrap(); - let ledger_2 = Ledger::::get(2).unwrap(); - let ledger_3 = Ledger::::get(3).unwrap(); - - // 4 becomes controller of 3. - Bonded::::mutate(3, |controller| *controller = Some(4)); - Ledger::::insert(4, ledger_3); - - // 3 becomes controller of 2. - Bonded::::mutate(2, |controller| *controller = Some(3)); - Ledger::::insert(3, ledger_2); - - // 2 becomes controller of 1 - Bonded::::mutate(1, |controller| *controller = Some(2)); - Ledger::::insert(2, ledger_1); - // 1 is not controller anymore. - Ledger::::remove(1); + // * 444 becomes controller of 333. + // * 555 becomes controller of 444. + // * 777 becomes controller of 555. + let ledger_333 = Ledger::::get(333).unwrap(); + let ledger_444 = Ledger::::get(444).unwrap(); + let ledger_555 = Ledger::::get(555).unwrap(); + + // 777 becomes controller of 555. + Bonded::::mutate(555, |controller| *controller = Some(777)); + Ledger::::insert(777, ledger_555); + + // 555 becomes controller of 444. + Bonded::::mutate(444, |controller| *controller = Some(555)); + Ledger::::insert(555, ledger_444); + + // 444 becomes controller of 333. + Bonded::::mutate(333, |controller| *controller = Some(444)); + Ledger::::insert(444, ledger_333); + + // 333 is not controller anymore. + Ledger::::remove(333); // checks. now we have: - // * 3 ledgers - assert_eq!(Ledger::::iter().count(), 3); - // * stash 1 has controller 2. - assert_eq!(Bonded::::get(1), Some(2)); - assert_eq!(StakingLedger::::paired_account(StakingAccount::Stash(1)), Some(2)); - assert_eq!(Ledger::::get(2).unwrap().stash, 1); - - // * stash 2 has controller 3. - assert_eq!(Bonded::::get(2), Some(3)); - assert_eq!(StakingLedger::::paired_account(StakingAccount::Stash(2)), Some(3)); - assert_eq!(Ledger::::get(3).unwrap().stash, 2); - - // * stash 3 has controller 4. - assert_eq!(Bonded::::get(3), Some(4)); - assert_eq!(StakingLedger::::paired_account(StakingAccount::Stash(3)), Some(4)); - assert_eq!(Ledger::::get(4).unwrap().stash, 3); + // * +3 ledgers + assert_eq!(Ledger::::iter().count(), 3 + init_ledgers); + + // * stash 333 has controller 444. + assert_eq!(Bonded::::get(333), Some(444)); + assert_eq!(StakingLedger::::paired_account(StakingAccount::Stash(333)), Some(444)); + assert_eq!(Ledger::::get(444).unwrap().stash, 333); + + // * stash 444 has controller 555. + assert_eq!(Bonded::::get(444), Some(555)); + assert_eq!(StakingLedger::::paired_account(StakingAccount::Stash(444)), Some(555)); + assert_eq!(Ledger::::get(555).unwrap().stash, 444); + + // * stash 555 has controller 777. + assert_eq!(Bonded::::get(555), Some(777)); + assert_eq!(StakingLedger::::paired_account(StakingAccount::Stash(555)), Some(777)); + assert_eq!(Ledger::::get(777).unwrap().stash, 555); } #[macro_export] diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs index d42456e53b13dbf20959b6aae980de0c9ed3e3e7..2f43e4847e451a7cc10413bb58e3d98d82577e9f 100644 --- a/substrate/frame/staking/src/pallet/impls.rs +++ b/substrate/frame/staking/src/pallet/impls.rs @@ -27,8 +27,8 @@ use frame_support::{ dispatch::WithPostDispatchInfo, pallet_prelude::*, traits::{ - Currency, Defensive, DefensiveSaturating, EstimateNextNewSession, Get, Imbalance, Len, - OnUnbalanced, TryCollect, UnixTime, + Currency, Defensive, DefensiveSaturating, EstimateNextNewSession, Get, Imbalance, + InspectLockableCurrency, Len, OnUnbalanced, TryCollect, UnixTime, }, weights::Weight, }; @@ -50,8 +50,8 @@ use sp_std::prelude::*; use crate::{ election_size_tracker::StaticTracker, log, slashing, weights::WeightInfo, ActiveEraInfo, BalanceOf, EraInfo, EraPayout, Exposure, ExposureOf, Forcing, IndividualExposure, - MaxNominationsOf, MaxWinnersOf, Nominations, NominationsQuota, PositiveImbalanceOf, - RewardDestination, SessionInterface, StakingLedger, ValidatorPrefs, + LedgerIntegrityState, MaxNominationsOf, MaxWinnersOf, Nominations, NominationsQuota, + PositiveImbalanceOf, RewardDestination, SessionInterface, StakingLedger, ValidatorPrefs, }; use super::pallet::*; @@ -84,6 +84,38 @@ impl Pallet { StakingLedger::::paired_account(Stash(stash.clone())) } + /// Inspects and returns the corruption state of a ledger and bond, if any. + /// + /// Note: all operations in this method access directly the `Bonded` and `Ledger` storage maps + /// instead of using the [`StakingLedger`] API since the bond and/or ledger may be corrupted. + pub(crate) fn inspect_bond_state( + stash: &T::AccountId, + ) -> Result> { + let lock = T::Currency::balance_locked(crate::STAKING_ID, &stash); + + let controller = >::get(stash).ok_or_else(|| { + if lock == Zero::zero() { + Error::::NotStash + } else { + Error::::BadState + } + })?; + + match Ledger::::get(controller) { + Some(ledger) => + if ledger.stash != *stash { + Ok(LedgerIntegrityState::Corrupted) + } else { + if lock != ledger.total { + Ok(LedgerIntegrityState::LockCorrupted) + } else { + Ok(LedgerIntegrityState::Ok) + } + }, + None => Ok(LedgerIntegrityState::CorruptedKilled), + } + } + /// The total balance that can be slashed from a stash account as of right now. pub fn slashable_balance_of(stash: &T::AccountId) -> BalanceOf { // Weight note: consider making the stake accessible through stash. @@ -1837,12 +1869,12 @@ impl Pallet { "VoterList contains non-staker" ); + Self::check_ledgers()?; Self::check_bonded_consistency()?; Self::check_payees()?; Self::check_nominators()?; Self::check_exposures()?; Self::check_paged_exposures()?; - Self::check_ledgers()?; Self::check_count() } @@ -1851,6 +1883,7 @@ impl Pallet { /// * A bonded (stash, controller) pair should have only one associated ledger. I.e. if the /// ledger is bonded by stash, the controller account must not bond a different ledger. /// * A bonded (stash, controller) pair must have an associated ledger. + /// /// NOTE: these checks result in warnings only. Once /// is resolved, turn warns into check /// failures. @@ -1906,7 +1939,7 @@ impl Pallet { /// Invariants: /// * A bonded ledger should always have an assigned `Payee`. /// * The number of entries in `Payee` and of bonded staking ledgers *must* match. - /// * The stash account in the ledger must match that of the bonded acount. + /// * The stash account in the ledger must match that of the bonded account. fn check_payees() -> Result<(), TryRuntimeError> { for (stash, _) in Bonded::::iter() { ensure!(Payee::::get(&stash).is_some(), "bonded ledger does not have payee set"); @@ -1945,19 +1978,18 @@ impl Pallet { } /// Invariants: - /// * `ledger.controller` is not stored in the storage (but populated at retrieval). /// * Stake consistency: ledger.total == ledger.active + sum(ledger.unlocking). - /// * The controller keyeing the ledger and the ledger stash matches the state of the `Bonded` - /// storage. + /// * The ledger's controller and stash matches the associated `Bonded` tuple. + /// * Staking locked funds for every bonded stash should be the same as its ledger's total. + /// * Staking ledger and bond are not corrupted. fn check_ledgers() -> Result<(), TryRuntimeError> { Bonded::::iter() .map(|(stash, ctrl)| { - // `ledger.controller` is never stored in raw storage. - let raw = Ledger::::get(stash).unwrap_or_else(|| { - Ledger::::get(ctrl.clone()) - .expect("try_check: bonded stash/ctrl does not have an associated ledger") - }); - ensure!(raw.controller.is_none(), "raw storage controller should be None"); + // ensure locks consistency. + ensure!( + Self::inspect_bond_state(&stash) == Ok(LedgerIntegrityState::Ok), + "bond, ledger and/or staking lock inconsistent for a bonded stash." + ); // ensure ledger consistency. Self::ensure_ledger_consistent(ctrl) diff --git a/substrate/frame/staking/src/pallet/mod.rs b/substrate/frame/staking/src/pallet/mod.rs index 1bf8bd8b09cb7e3255a36ccf6afe6c5fbf5f8111..2e5b3aa7b873e2c123b3176a4e80d6e0cd1b6177 100644 --- a/substrate/frame/staking/src/pallet/mod.rs +++ b/substrate/frame/staking/src/pallet/mod.rs @@ -25,7 +25,7 @@ use frame_support::{ pallet_prelude::*, traits::{ Currency, Defensive, DefensiveSaturating, EnsureOrigin, EstimateNextNewSession, Get, - LockableCurrency, OnUnbalanced, UnixTime, + InspectLockableCurrency, LockableCurrency, OnUnbalanced, UnixTime, WithdrawReasons, }, weights::Weight, BoundedVec, @@ -48,13 +48,13 @@ pub use impls::*; use crate::{ slashing, weights::WeightInfo, AccountIdLookupOf, ActiveEraInfo, BalanceOf, EraPayout, - EraRewardPoints, Exposure, ExposurePage, Forcing, MaxNominationsOf, NegativeImbalanceOf, - Nominations, NominationsQuota, PositiveImbalanceOf, RewardDestination, SessionInterface, - StakingLedger, UnappliedSlash, UnlockChunk, ValidatorPrefs, + EraRewardPoints, Exposure, ExposurePage, Forcing, LedgerIntegrityState, MaxNominationsOf, + NegativeImbalanceOf, Nominations, NominationsQuota, PositiveImbalanceOf, RewardDestination, + SessionInterface, StakingLedger, UnappliedSlash, UnlockChunk, ValidatorPrefs, }; // The speculative number of spans are used as an input of the weight annotation of -// [`Call::unbond`], as the post dipatch weight may depend on the number of slashing span on the +// [`Call::unbond`], as the post dispatch weight may depend on the number of slashing span on the // account which is not provided as an input. The value set should be conservative but sensible. pub(crate) const SPECULATIVE_NUM_SPANS: u32 = 32; @@ -88,10 +88,10 @@ pub mod pallet { pub trait Config: frame_system::Config { /// The staking balance. type Currency: LockableCurrency< - Self::AccountId, - Moment = BlockNumberFor, - Balance = Self::CurrencyBalance, - >; + Self::AccountId, + Moment = BlockNumberFor, + Balance = Self::CurrencyBalance, + > + InspectLockableCurrency; /// Just the `Currency::Balance` type; we have this item to allow us to constrain it to /// `From`. type CurrencyBalance: sp_runtime::traits::AtLeast32BitUnsigned @@ -796,6 +796,7 @@ pub mod pallet { } #[pallet::error] + #[derive(PartialEq)] pub enum Error { /// Not a controller account. NotController, @@ -855,6 +856,8 @@ pub mod pallet { BoundNotMet, /// Used when attempting to use deprecated controller account logic. ControllerDeprecated, + /// Cannot reset a ledger. + CannotRestoreLedger, } #[pallet::hooks] @@ -1134,7 +1137,7 @@ pub mod pallet { /// this call results in a complete removal of all the data related to the stash account. /// In this case, the `num_slashing_spans` must be larger or equal to the number of /// slashing spans associated with the stash account in the [`SlashingSpans`] storage type, - /// otherwise the call will fail. The call weight is directly propotional to + /// otherwise the call will fail. The call weight is directly proportional to /// `num_slashing_spans`. /// /// ## Complexity @@ -1376,7 +1379,7 @@ pub mod pallet { Ok(()) } - /// Increments the ideal number of validators upto maximum of + /// Increments the ideal number of validators up to maximum of /// `ElectionProviderBase::MaxWinners`. /// /// The dispatch origin must be Root. @@ -1401,7 +1404,7 @@ pub mod pallet { Ok(()) } - /// Scale up the ideal number of validators by a factor upto maximum of + /// Scale up the ideal number of validators by a factor up to maximum of /// `ElectionProviderBase::MaxWinners`. /// /// The dispatch origin must be Root. @@ -1980,6 +1983,108 @@ pub mod pallet { Ok(Some(T::WeightInfo::deprecate_controller_batch(controllers.len() as u32)).into()) } + + /// Restores the state of a ledger which is in an inconsistent state. + /// + /// The requirements to restore a ledger are the following: + /// * The stash is bonded; or + /// * The stash is not bonded but it has a staking lock left behind; or + /// * If the stash has an associated ledger and its state is inconsistent; or + /// * If the ledger is not corrupted *but* its staking lock is out of sync. + /// + /// The `maybe_*` input parameters will overwrite the corresponding data and metadata of the + /// ledger associated with the stash. If the input parameters are not set, the ledger will + /// be reset values from on-chain state. + #[pallet::call_index(29)] + #[pallet::weight(T::WeightInfo::restore_ledger())] + pub fn restore_ledger( + origin: OriginFor, + stash: T::AccountId, + maybe_controller: Option, + maybe_total: Option>, + maybe_unlocking: Option>, T::MaxUnlockingChunks>>, + ) -> DispatchResult { + T::AdminOrigin::ensure_origin(origin)?; + + let current_lock = T::Currency::balance_locked(crate::STAKING_ID, &stash); + let stash_balance = T::Currency::free_balance(&stash); + + let (new_controller, new_total) = match Self::inspect_bond_state(&stash) { + Ok(LedgerIntegrityState::Corrupted) => { + let new_controller = maybe_controller.unwrap_or(stash.clone()); + + let new_total = if let Some(total) = maybe_total { + let new_total = total.min(stash_balance); + // enforce lock == ledger.amount. + T::Currency::set_lock( + crate::STAKING_ID, + &stash, + new_total, + WithdrawReasons::all(), + ); + new_total + } else { + current_lock + }; + + Ok((new_controller, new_total)) + }, + Ok(LedgerIntegrityState::CorruptedKilled) => { + if current_lock == Zero::zero() { + // this case needs to restore both lock and ledger, so the new total needs + // to be given by the called since there's no way to restore the total + // on-chain. + ensure!(maybe_total.is_some(), Error::::CannotRestoreLedger); + Ok(( + stash.clone(), + maybe_total.expect("total exists as per the check above; qed."), + )) + } else { + Ok((stash.clone(), current_lock)) + } + }, + Ok(LedgerIntegrityState::LockCorrupted) => { + // ledger is not corrupted but its locks are out of sync. In this case, we need + // to enforce a new ledger.total and staking lock for this stash. + let new_total = + maybe_total.ok_or(Error::::CannotRestoreLedger)?.min(stash_balance); + T::Currency::set_lock( + crate::STAKING_ID, + &stash, + new_total, + WithdrawReasons::all(), + ); + + Ok((stash.clone(), new_total)) + }, + Err(Error::::BadState) => { + // the stash and ledger do not exist but lock is lingering. + T::Currency::remove_lock(crate::STAKING_ID, &stash); + ensure!( + Self::inspect_bond_state(&stash) == Err(Error::::NotStash), + Error::::BadState + ); + + return Ok(()); + }, + Ok(LedgerIntegrityState::Ok) | Err(_) => Err(Error::::CannotRestoreLedger), + }?; + + // re-bond stash and controller tuple. + Bonded::::insert(&stash, &new_controller); + + // resoter ledger state. + let mut ledger = StakingLedger::::new(stash.clone(), new_total); + ledger.controller = Some(new_controller); + ledger.unlocking = maybe_unlocking.unwrap_or_default(); + ledger.update()?; + + ensure!( + Self::inspect_bond_state(&stash) == Ok(LedgerIntegrityState::Ok), + Error::::BadState + ); + Ok(()) + } } } diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs index 3725c9e3c2c5bc9e11fbe01802a55c0c1a22dd58..a5c9abe2f176233f1f66e9872fde0a6bde99bbcb 100644 --- a/substrate/frame/staking/src/tests.rs +++ b/substrate/frame/staking/src/tests.rs @@ -270,7 +270,7 @@ fn change_controller_works() { assert_eq!(ledger.controller(), Some(stash)); // the raw storage ledger's controller is always `None`. however, we can still fetch the - // correct controller with `ledger.controler()`. + // correct controller with `ledger.controller()`. let raw_ledger = >::get(&stash).unwrap(); assert_eq!(raw_ledger.controller, None); @@ -1257,7 +1257,7 @@ fn bond_extra_controller_bad_state_works() { Bonded::::insert(31, 41); // we confirm that the ledger is in bad state: 31 has 41 as controller and when fetching - // the ledger associated with the controler 41, its stash is 41 (and not 31). + // the ledger associated with the controller 41, its stash is 41 (and not 31). assert_eq!(Ledger::::get(41).unwrap().stash, 41); // if the ledger is in this bad state, the `bond_extra` should fail. @@ -1815,7 +1815,7 @@ fn max_staked_rewards_works() { let total_payout = treasury_payout + validators_payout; // max stakers payout (without max staked rewards cap applied) is larger than the final - // validator rewards. The final payment and remainder should be adjusted by redestributing + // validator rewards. The final payment and remainder should be adjusted by redistributing // the era inflation to apply the cap... assert!(max_stakers_payout > validators_payout); @@ -4686,7 +4686,7 @@ fn bond_during_era_does_not_populate_legacy_claimed_rewards() { } ); - // make sure only era upto history depth is stored + // make sure only era up to history depth is stored let current_era = 99; mock::start_active_era(current_era); bond_validator(13, 1000); @@ -5412,7 +5412,7 @@ mod election_data_provider { Event::SnapshotVotersSizeExceeded { size: 75 } ); - // however, if the election voter size bounds were largers, the snapshot would + // however, if the election voter size bounds were larger, the snapshot would // include the electing voters of 70. let bounds = ElectionBoundsBuilder::default().voters_size(1_000.into()).build(); assert_eq!( @@ -6933,40 +6933,43 @@ mod ledger { setup_double_bonded_ledgers(); // Case 1: double bonded but not corrupted: - // stash 2 has controller 3: - assert_eq!(Bonded::::get(2), Some(3)); - assert_eq!(Ledger::::get(3).unwrap().stash, 2); + // stash 444 has controller 555: + assert_eq!(Bonded::::get(444), Some(555)); + assert_eq!(Ledger::::get(555).unwrap().stash, 444); - // stash 2 is also a controller of 1: - assert_eq!(Bonded::::get(1), Some(2)); - assert_eq!(StakingLedger::::paired_account(StakingAccount::Stash(1)), Some(2)); - assert_eq!(Ledger::::get(2).unwrap().stash, 1); + // stash 444 is also a controller of 333: + assert_eq!(Bonded::::get(333), Some(444)); + assert_eq!( + StakingLedger::::paired_account(StakingAccount::Stash(333)), + Some(444) + ); + assert_eq!(Ledger::::get(444).unwrap().stash, 333); - // although 2 is double bonded (it is a controller and a stash of different ledgers), + // although 444 is double bonded (it is a controller and a stash of different ledgers), // we can safely retrieve the ledger and mutate it since the correct ledger is // returned. - let ledger_result = StakingLedger::::get(StakingAccount::Stash(2)); - assert_eq!(ledger_result.unwrap().stash, 2); // correct ledger. + let ledger_result = StakingLedger::::get(StakingAccount::Stash(444)); + assert_eq!(ledger_result.unwrap().stash, 444); // correct ledger. - let ledger_result = StakingLedger::::get(StakingAccount::Controller(2)); - assert_eq!(ledger_result.unwrap().stash, 1); // correct ledger. + let ledger_result = StakingLedger::::get(StakingAccount::Controller(444)); + assert_eq!(ledger_result.unwrap().stash, 333); // correct ledger. - // fetching ledger 1 by its stash works. - let ledger_result = StakingLedger::::get(StakingAccount::Stash(1)); - assert_eq!(ledger_result.unwrap().stash, 1); + // fetching ledger 333 by its stash works. + let ledger_result = StakingLedger::::get(StakingAccount::Stash(333)); + assert_eq!(ledger_result.unwrap().stash, 333); // Case 2: corrupted ledger bonding. // in this case, we simulate what happens when fetching a ledger by stash returns a // ledger with a different stash. when this happens, we return an error instead of the // ledger to prevent ledger mutations. - let mut ledger = Ledger::::get(2).unwrap(); - assert_eq!(ledger.stash, 1); - ledger.stash = 2; - Ledger::::insert(2, ledger); + let mut ledger = Ledger::::get(444).unwrap(); + assert_eq!(ledger.stash, 333); + ledger.stash = 444; + Ledger::::insert(444, ledger); // now, we are prevented from fetching the ledger by stash from 1. It's associated // controller (2) is now bonding a ledger with a different stash (2, not 1). - assert!(StakingLedger::::get(StakingAccount::Stash(1)).is_err()); + assert!(StakingLedger::::get(StakingAccount::Stash(333)).is_err()); }) } @@ -7069,7 +7072,7 @@ mod ledger { #[test] fn deprecate_controller_batch_works_full_weight() { - ExtBuilder::default().build_and_execute(|| { + ExtBuilder::default().try_state(false).build_and_execute(|| { // Given: let start = 1001; @@ -7253,7 +7256,7 @@ mod ledger { let bounded_controllers: BoundedVec< _, ::MaxControllersInDeprecationBatch, - > = BoundedVec::try_from(vec![1, 2, 3, 4]).unwrap(); + > = BoundedVec::try_from(vec![333, 444, 555, 777]).unwrap(); assert_ok!(Staking::deprecate_controller_batch( RuntimeOrigin::root(), @@ -7276,7 +7279,7 @@ mod ledger { let bounded_controllers: BoundedVec< _, ::MaxControllersInDeprecationBatch, - > = BoundedVec::try_from(vec![4, 3, 2, 1]).unwrap(); + > = BoundedVec::try_from(vec![777, 555, 444, 333]).unwrap(); assert_ok!(Staking::deprecate_controller_batch( RuntimeOrigin::root(), @@ -7296,9 +7299,9 @@ mod ledger { setup_double_bonded_ledgers(); // in this case, setting controller works due to the ordering of the calls. - assert_ok!(Staking::set_controller(RuntimeOrigin::signed(1))); - assert_ok!(Staking::set_controller(RuntimeOrigin::signed(2))); - assert_ok!(Staking::set_controller(RuntimeOrigin::signed(3))); + assert_ok!(Staking::set_controller(RuntimeOrigin::signed(333))); + assert_ok!(Staking::set_controller(RuntimeOrigin::signed(444))); + assert_ok!(Staking::set_controller(RuntimeOrigin::signed(555))); }) } @@ -7307,17 +7310,400 @@ mod ledger { ExtBuilder::default().has_stakers(false).try_state(false).build_and_execute(|| { setup_double_bonded_ledgers(); - // setting the controller of ledger associated with stash 3 fails since its stash is a + // setting the controller of ledger associated with stash 555 fails since its stash is a // controller of another ledger. assert_noop!( - Staking::set_controller(RuntimeOrigin::signed(3)), + Staking::set_controller(RuntimeOrigin::signed(555)), Error::::BadState ); assert_noop!( - Staking::set_controller(RuntimeOrigin::signed(2)), + Staking::set_controller(RuntimeOrigin::signed(444)), Error::::BadState ); - assert_ok!(Staking::set_controller(RuntimeOrigin::signed(1))); + assert_ok!(Staking::set_controller(RuntimeOrigin::signed(333))); + }) + } +} + +mod ledger_recovery { + use super::*; + use frame_support::traits::InspectLockableCurrency; + + #[test] + fn inspect_recovery_ledger_simple_works() { + ExtBuilder::default().has_stakers(true).try_state(false).build_and_execute(|| { + setup_double_bonded_ledgers(); + + // non corrupted ledger. + assert_eq!(Staking::inspect_bond_state(&11).unwrap(), LedgerIntegrityState::Ok); + + // non bonded stash. + assert!(Bonded::::get(&1111).is_none()); + assert!(Staking::inspect_bond_state(&1111).is_err()); + + // double bonded but not corrupted. + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Ok); + }) + } + + #[test] + fn inspect_recovery_ledger_corupted_killed_works() { + ExtBuilder::default().has_stakers(true).try_state(false).build_and_execute(|| { + setup_double_bonded_ledgers(); + + let lock_333_before = Balances::balance_locked(crate::STAKING_ID, &333); + + // get into corrupted and killed ledger state by killing a corrupted ledger: + // init state: + // (333, 444) + // (444, 555) + // set_controller(444) to 444 + // (333, 444) -> corrupted + // (444, 444) + // kill(333) + // (444, 444) -> corrupted and None. + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Ok); + set_controller_no_checks(&444); + + // now try-state fails. + assert!(Staking::do_try_state(System::block_number()).is_err()); + + // 333 is corrupted since it's controller is linking 444 ledger. + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Corrupted); + // 444 however is OK. + assert_eq!(Staking::inspect_bond_state(&444).unwrap(), LedgerIntegrityState::Ok); + + // kill the corrupted ledger that is associated with stash 333. + assert_ok!(StakingLedger::::kill(&333)); + + // 333 bond is no more but it returns `BadState` because the lock on this stash is + // still set (see checks below). + assert_eq!(Staking::inspect_bond_state(&333), Err(Error::::BadState)); + // now the *other* ledger associated with 444 has been corrupted and killed (None). + assert_eq!( + Staking::inspect_bond_state(&444), + Ok(LedgerIntegrityState::CorruptedKilled) + ); + + // side effects on 333 - ledger, bonded, payee, lock should be completely empty. + // however, 333 lock remains. + assert_eq!(Balances::balance_locked(crate::STAKING_ID, &333), lock_333_before); // NOK + assert!(Bonded::::get(&333).is_none()); // OK + assert!(Payee::::get(&333).is_none()); // OK + assert!(Ledger::::get(&444).is_none()); // OK + + // side effects on 444 - ledger, bonded, payee, lock should remain be intact. + // however, 444 lock was removed. + assert_eq!(Balances::balance_locked(crate::STAKING_ID, &444), 0); // NOK + assert!(Bonded::::get(&444).is_some()); // OK + assert!(Payee::::get(&444).is_some()); // OK + assert!(Ledger::::get(&555).is_none()); // NOK + + assert!(Staking::do_try_state(System::block_number()).is_err()); + }) + } + + #[test] + fn inspect_recovery_ledger_corupted_killed_other_works() { + ExtBuilder::default().has_stakers(true).try_state(false).build_and_execute(|| { + setup_double_bonded_ledgers(); + + let lock_333_before = Balances::balance_locked(crate::STAKING_ID, &333); + + // get into corrupted and killed ledger state by killing a corrupted ledger: + // init state: + // (333, 444) + // (444, 555) + // set_controller(444) to 444 + // (333, 444) -> corrupted + // (444, 444) + // kill(444) + // (333, 444) -> corrupted and None + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Ok); + set_controller_no_checks(&444); + + // now try-state fails. + assert!(Staking::do_try_state(System::block_number()).is_err()); + + // 333 is corrupted since it's controller is linking 444 ledger. + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Corrupted); + // 444 however is OK. + assert_eq!(Staking::inspect_bond_state(&444).unwrap(), LedgerIntegrityState::Ok); + + // kill the *other* ledger that is double bonded but not corrupted. + assert_ok!(StakingLedger::::kill(&444)); + + // now 333 is corrupted and None through the *other* ledger being killed. + assert_eq!( + Staking::inspect_bond_state(&333).unwrap(), + LedgerIntegrityState::CorruptedKilled, + ); + // 444 is cleaned and not a stash anymore; no lock left behind. + assert_eq!(Ledger::::get(&444), None); + assert_eq!(Staking::inspect_bond_state(&444), Err(Error::::NotStash)); + + // side effects on 333 - ledger, bonded, payee, lock should be intact. + assert_eq!(Balances::balance_locked(crate::STAKING_ID, &333), lock_333_before); // OK + assert_eq!(Bonded::::get(&333), Some(444)); // OK + assert!(Payee::::get(&333).is_some()); // OK + // however, ledger associated with its controller was killed. + assert!(Ledger::::get(&444).is_none()); // NOK + + // side effects on 444 - ledger, bonded, payee, lock should be completely removed. + assert_eq!(Balances::balance_locked(crate::STAKING_ID, &444), 0); // OK + assert!(Bonded::::get(&444).is_none()); // OK + assert!(Payee::::get(&444).is_none()); // OK + assert!(Ledger::::get(&555).is_none()); // OK + + assert!(Staking::do_try_state(System::block_number()).is_err()); + }) + } + + #[test] + fn inspect_recovery_ledger_lock_corrupted_works() { + ExtBuilder::default().has_stakers(true).try_state(false).build_and_execute(|| { + setup_double_bonded_ledgers(); + + // get into lock corrupted ledger state by bond_extra on a ledger that is double bonded + // with a corrupted ledger. + // init state: + // (333, 444) + // (444, 555) + // set_controller(444) to 444 + // (333, 444) -> corrupted + // (444, 444) + // bond_extra(333, 10) -> lock corrupted on 444 + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Ok); + set_controller_no_checks(&444); + bond_extra_no_checks(&333, 10); + + // now try-state fails. + assert!(Staking::do_try_state(System::block_number()).is_err()); + + // 333 is corrupted since it's controller is linking 444 ledger. + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Corrupted); + // 444 ledger is not corrupted but locks got out of sync. + assert_eq!( + Staking::inspect_bond_state(&444).unwrap(), + LedgerIntegrityState::LockCorrupted + ); + }) + } + + // Corrupted ledger restore. + // + // * Double bonded and corrupted ledger. + #[test] + fn restore_ledger_corrupted_works() { + ExtBuilder::default().has_stakers(true).build_and_execute(|| { + setup_double_bonded_ledgers(); + + // get into corrupted and killed ledger state. + // init state: + // (333, 444) + // (444, 555) + // set_controller(444) to 444 + // (333, 444) -> corrupted + // (444, 444) + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Ok); + set_controller_no_checks(&444); + + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Corrupted); + + // now try-state fails. + assert!(Staking::do_try_state(System::block_number()).is_err()); + + // recover the ledger bonded by 333 stash. + assert_ok!(Staking::restore_ledger(RuntimeOrigin::root(), 333, None, None, None)); + + // try-state checks are ok now. + assert_ok!(Staking::do_try_state(System::block_number())); + }) + } + + // Corrupted and killed ledger restore. + // + // * Double bonded and corrupted ledger. + // * Ledger killed by own controller. + #[test] + fn restore_ledger_corrupted_killed_works() { + ExtBuilder::default().has_stakers(true).build_and_execute(|| { + setup_double_bonded_ledgers(); + + // ledger.total == lock + let total_444_before_corruption = Balances::balance_locked(crate::STAKING_ID, &444); + + // get into corrupted and killed ledger state by killing a corrupted ledger: + // init state: + // (333, 444) + // (444, 555) + // set_controller(444) to 444 + // (333, 444) -> corrupted + // (444, 444) + // kill(333) + // (444, 444) -> corrupted and None. + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Ok); + set_controller_no_checks(&444); + + // kill the corrupted ledger that is associated with stash 333. + assert_ok!(StakingLedger::::kill(&333)); + + // 333 bond is no more but it returns `BadState` because the lock on this stash is + // still set (see checks below). + assert_eq!(Staking::inspect_bond_state(&333), Err(Error::::BadState)); + // now the *other* ledger associated with 444 has been corrupted and killed (None). + assert!(Staking::ledger(StakingAccount::Stash(444)).is_err()); + + // try-state should fail. + assert!(Staking::do_try_state(System::block_number()).is_err()); + + // recover the ledger bonded by 333 stash. + assert_ok!(Staking::restore_ledger(RuntimeOrigin::root(), 333, None, None, None)); + + // for the try-state checks to pass, we also need to recover the stash 444 which is + // corrupted too by proxy of kill(333). Currently, both the lock and the ledger of 444 + // have been cleared so we need to provide the new amount to restore the ledger. + assert_noop!( + Staking::restore_ledger(RuntimeOrigin::root(), 444, None, None, None), + Error::::CannotRestoreLedger + ); + + assert_ok!(Staking::restore_ledger( + RuntimeOrigin::root(), + 444, + None, + Some(total_444_before_corruption), + None, + )); + + // try-state checks are ok now. + assert_ok!(Staking::do_try_state(System::block_number())); + }) + } + + // Corrupted and killed by *other* ledger restore. + // + // * Double bonded and corrupted ledger. + // * Ledger killed by own controller. + #[test] + fn restore_ledger_corrupted_killed_other_works() { + ExtBuilder::default().has_stakers(true).build_and_execute(|| { + setup_double_bonded_ledgers(); + + // get into corrupted and killed ledger state by killing a corrupted ledger: + // init state: + // (333, 444) + // (444, 555) + // set_controller(444) to 444 + // (333, 444) -> corrupted + // (444, 444) + // kill(444) + // (333, 444) -> corrupted and None + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Ok); + set_controller_no_checks(&444); + + // now try-state fails. + assert!(Staking::do_try_state(System::block_number()).is_err()); + + // 333 is corrupted since it's controller is linking 444 ledger. + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Corrupted); + // 444 however is OK. + assert_eq!(Staking::inspect_bond_state(&444).unwrap(), LedgerIntegrityState::Ok); + + // kill the *other* ledger that is double bonded but not corrupted. + assert_ok!(StakingLedger::::kill(&444)); + + // recover the ledger bonded by 333 stash. + assert_ok!(Staking::restore_ledger(RuntimeOrigin::root(), 333, None, None, None)); + + // 444 does not need recover in this case since it's been killed successfully. + assert_eq!(Staking::inspect_bond_state(&444), Err(Error::::NotStash)); + + // try-state checks are ok now. + assert_ok!(Staking::do_try_state(System::block_number())); + }) + } + + // Corrupted with bond_extra. + // + // * Double bonded and corrupted ledger. + // * Corrupted ledger calls `bond_extra` + #[test] + fn restore_ledger_corrupted_bond_extra_works() { + ExtBuilder::default().has_stakers(true).build_and_execute(|| { + setup_double_bonded_ledgers(); + + let lock_333_before = Balances::balance_locked(crate::STAKING_ID, &333); + let lock_444_before = Balances::balance_locked(crate::STAKING_ID, &444); + + // get into corrupted and killed ledger state by killing a corrupted ledger: + // init state: + // (333, 444) + // (444, 555) + // set_controller(444) to 444 + // (333, 444) -> corrupted + // (444, 444) + // bond_extra(444, 40) -> OK + // bond_extra(333, 30) -> locks out of sync + + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Ok); + set_controller_no_checks(&444); + + // now try-state fails. + assert!(Staking::do_try_state(System::block_number()).is_err()); + + // if 444 bonds extra, the locks remain in sync. + bond_extra_no_checks(&444, 40); + assert_eq!(Balances::balance_locked(crate::STAKING_ID, &333), lock_333_before); + assert_eq!(Balances::balance_locked(crate::STAKING_ID, &444), lock_444_before + 40); + + // however if 333 bonds extra, the wrong lock is updated. + bond_extra_no_checks(&333, 30); + assert_eq!( + Balances::balance_locked(crate::STAKING_ID, &333), + lock_444_before + 40 + 30 + ); //not OK + assert_eq!(Balances::balance_locked(crate::STAKING_ID, &444), lock_444_before + 40); // OK + + // recover the ledger bonded by 333 stash. Note that the total/lock needs to be + // re-written since on-chain data lock has become out of sync. + assert_ok!(Staking::restore_ledger( + RuntimeOrigin::root(), + 333, + None, + Some(lock_333_before + 30), + None + )); + + // now recover 444 that although it's not corrupted, its lock and ledger.total are out + // of sync. in which case, we need to explicitly set the ledger's lock and amount, + // otherwise the ledger recover will fail. + assert_noop!( + Staking::restore_ledger(RuntimeOrigin::root(), 444, None, None, None), + Error::::CannotRestoreLedger + ); + + //and enforcing a new ledger lock/total on this non-corrupted ledger will work. + assert_ok!(Staking::restore_ledger( + RuntimeOrigin::root(), + 444, + None, + Some(lock_444_before + 40), + None + )); + + // double-check that ledgers got to expected state and bond_extra done during the + // corrupted state is part of the recovered ledgers. + let ledger_333 = Bonded::::get(&333).and_then(Ledger::::get).unwrap(); + let ledger_444 = Bonded::::get(&444).and_then(Ledger::::get).unwrap(); + + assert_eq!(ledger_333.total, lock_333_before + 30); + assert_eq!(Balances::balance_locked(crate::STAKING_ID, &333), ledger_333.total); + assert_eq!(ledger_444.total, lock_444_before + 40); + assert_eq!(Balances::balance_locked(crate::STAKING_ID, &444), ledger_444.total); + + // try-state checks are ok now. + assert_ok!(Staking::do_try_state(System::block_number())); }) } } diff --git a/substrate/frame/staking/src/weights.rs b/substrate/frame/staking/src/weights.rs index 6f729e08ba5c839997cd81239f0e4bd39728df14..8a04a3dfb3f70aad5592944f0b847dc080cece89 100644 --- a/substrate/frame/staking/src/weights.rs +++ b/substrate/frame/staking/src/weights.rs @@ -17,10 +17,10 @@ //! Autogenerated weights for `pallet_staking` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2024-01-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-q7z7ruxr-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -80,6 +80,7 @@ pub trait WeightInfo { fn chill_other() -> Weight; fn force_apply_min_commission() -> Weight; fn set_min_commission() -> Weight; + fn restore_ledger() -> Weight; } /// Weights for `pallet_staking` using the Substrate node and recommended hardware. @@ -87,21 +88,21 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { /// Storage: `Staking::Bonded` (r:1 w:1) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) - /// Storage: `Staking::Ledger` (r:0 w:1) - /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::Payee` (r:0 w:1) /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn bond() -> Weight { // Proof Size summary in bytes: - // Measured: `927` + // Measured: `1042` // Estimated: `4764` - // Minimum execution time: 42_042_000 picoseconds. - Weight::from_parts(43_292_000, 4764) - .saturating_add(T::DbWeight::get().reads(3_u64)) + // Minimum execution time: 48_753_000 picoseconds. + Weight::from_parts(50_539_000, 4764) + .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } /// Storage: `Staking::Bonded` (r:1 w:0) @@ -120,21 +121,21 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1990` // Estimated: `8877` - // Minimum execution time: 85_050_000 picoseconds. - Weight::from_parts(87_567_000, 8877) + // Minimum execution time: 92_701_000 picoseconds. + Weight::from_parts(95_657_000, 8877) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:0) /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) /// Storage: `Staking::MinNominatorBond` (r:1 w:0) /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:1 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -147,41 +148,43 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2195` // Estimated: `8877` - // Minimum execution time: 89_076_000 picoseconds. - Weight::from_parts(92_715_000, 8877) + // Minimum execution time: 101_049_000 picoseconds. + Weight::from_parts(103_729_000, 8877) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Staking::CurrentEra` (r:1 w:0) - /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:0) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1115` + // Measured: `1297` // Estimated: `4764` - // Minimum execution time: 42_067_000 picoseconds. - Weight::from_parts(43_239_807, 4764) - // Standard Error: 831 - .saturating_add(Weight::from_parts(46_257, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(5_u64)) + // Minimum execution time: 51_672_000 picoseconds. + Weight::from_parts(53_817_441, 4764) + // Standard Error: 1_124 + .saturating_add(Weight::from_parts(49_168, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::SlashingSpans` (r:1 w:1) /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Staking::Bonded` (r:1 w:1) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -207,10 +210,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 86_490_000 picoseconds. - Weight::from_parts(95_358_751, 6248) - // Standard Error: 3_952 - .saturating_add(Weight::from_parts(1_294_907, 0).saturating_mul(s.into())) + // Minimum execution time: 92_846_000 picoseconds. + Weight::from_parts(102_158_606, 6248) + // Standard Error: 4_187 + .saturating_add(Weight::from_parts(1_436_364, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(13_u64)) .saturating_add(T::DbWeight::get().writes(11_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -218,6 +221,8 @@ impl WeightInfo for SubstrateWeight { } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::MinValidatorBond` (r:1 w:0) /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::MinCommission` (r:1 w:0) @@ -228,8 +233,6 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:0) /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:1 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListNodes` (r:1 w:1) /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:1 w:1) @@ -242,31 +245,35 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1372` // Estimated: `4556` - // Minimum execution time: 50_326_000 picoseconds. - Weight::from_parts(52_253_000, 4556) + // Minimum execution time: 58_162_000 picoseconds. + Weight::from_parts(60_124_000, 4556) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:128 w:128) /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) /// The range of component `k` is `[1, 128]`. fn kick(k: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1280 + k * (569 ±0)` + // Measured: `1815 + k * (572 ±0)` // Estimated: `4556 + k * (3033 ±0)` - // Minimum execution time: 29_305_000 picoseconds. - Weight::from_parts(32_199_604, 4556) - // Standard Error: 7_150 - .saturating_add(Weight::from_parts(6_437_124, 0).saturating_mul(k.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) + // Minimum execution time: 37_950_000 picoseconds. + Weight::from_parts(34_461_075, 4556) + // Standard Error: 8_013 + .saturating_add(Weight::from_parts(6_696_510, 0).saturating_mul(k.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) .saturating_add(Weight::from_parts(0, 3033).saturating_mul(k.into())) } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::MinNominatorBond` (r:1 w:0) /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:1) @@ -277,8 +284,6 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:1 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListNodes` (r:2 w:2) /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:1 w:1) @@ -292,10 +297,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1866 + n * (102 ±0)` // Estimated: `6248 + n * (2520 ±0)` - // Minimum execution time: 63_267_000 picoseconds. - Weight::from_parts(61_741_404, 6248) - // Standard Error: 12_955 - .saturating_add(Weight::from_parts(3_811_743, 0).saturating_mul(n.into())) + // Minimum execution time: 70_167_000 picoseconds. + Weight::from_parts(68_024_084, 6248) + // Standard Error: 14_256 + .saturating_add(Weight::from_parts(4_195_757, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(6_u64)) @@ -303,6 +308,8 @@ impl WeightInfo for SubstrateWeight { } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Validators` (r:1 w:0) /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:1) @@ -317,11 +324,11 @@ impl WeightInfo for SubstrateWeight { /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill() -> Weight { // Proof Size summary in bytes: - // Measured: `1650` + // Measured: `1816` // Estimated: `6248` - // Minimum execution time: 52_862_000 picoseconds. - Weight::from_parts(54_108_000, 6248) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Minimum execution time: 61_730_000 picoseconds. + Weight::from_parts(63_430_000, 6248) + .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } /// Storage: `Staking::Ledger` (r:1 w:0) @@ -334,37 +341,37 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `902` // Estimated: `4556` - // Minimum execution time: 16_350_000 picoseconds. - Weight::from_parts(16_802_000, 4556) + // Minimum execution time: 20_857_000 picoseconds. + Weight::from_parts(21_615_000, 4556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Staking::Payee` (r:1 w:1) - /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:1 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn update_payee() -> Weight { // Proof Size summary in bytes: // Measured: `969` // Estimated: `4556` - // Minimum execution time: 19_981_000 picoseconds. - Weight::from_parts(20_539_000, 4556) + // Minimum execution time: 24_739_000 picoseconds. + Weight::from_parts(25_785_000, 4556) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::Bonded` (r:1 w:1) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) - /// Storage: `Staking::Ledger` (r:1 w:2) + /// Storage: `Staking::Ledger` (r:2 w:2) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) fn set_controller() -> Weight { // Proof Size summary in bytes: // Measured: `902` - // Estimated: `4556` - // Minimum execution time: 19_304_000 picoseconds. - Weight::from_parts(20_000_000, 4556) - .saturating_add(T::DbWeight::get().reads(2_u64)) + // Estimated: `8122` + // Minimum execution time: 24_622_000 picoseconds. + Weight::from_parts(25_220_000, 8122) + .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Staking::ValidatorCount` (r:0 w:1) @@ -373,8 +380,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_568_000 picoseconds. - Weight::from_parts(2_708_000, 0) + // Minimum execution time: 2_634_000 picoseconds. + Weight::from_parts(2_842_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -383,8 +390,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_950_000 picoseconds. - Weight::from_parts(8_348_000, 0) + // Minimum execution time: 8_496_000 picoseconds. + Weight::from_parts(9_016_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -393,8 +400,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_967_000 picoseconds. - Weight::from_parts(8_222_000, 0) + // Minimum execution time: 8_510_000 picoseconds. + Weight::from_parts(8_893_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -403,8 +410,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_006_000 picoseconds. - Weight::from_parts(8_440_000, 0) + // Minimum execution time: 8_243_000 picoseconds. + Weight::from_parts(8_678_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::Invulnerables` (r:0 w:1) @@ -414,30 +421,30 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_524_000 picoseconds. - Weight::from_parts(3_123_608, 0) - // Standard Error: 59 - .saturating_add(Weight::from_parts(11_596, 0).saturating_mul(v.into())) + // Minimum execution time: 2_781_000 picoseconds. + Weight::from_parts(3_441_708, 0) + // Standard Error: 58 + .saturating_add(Weight::from_parts(11_811, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: `Staking::Ledger` (r:5900 w:11800) + /// Storage: `Staking::Ledger` (r:11800 w:11800) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:5900 w:5900) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Payee` (r:5900 w:0) /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:0 w:5900) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// The range of component `i` is `[0, 5900]`. fn deprecate_controller_batch(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1356 + i * (151 ±0)` - // Estimated: `990 + i * (3566 ±0)` - // Minimum execution time: 2_092_000 picoseconds. - Weight::from_parts(2_258_000, 990) - // Standard Error: 32_695 - .saturating_add(Weight::from_parts(16_669_219, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(i.into()))) + // Measured: `1746 + i * (229 ±0)` + // Estimated: `990 + i * (7132 ±0)` + // Minimum execution time: 5_331_000 picoseconds. + Weight::from_parts(5_511_000, 990) + // Standard Error: 66_734 + .saturating_add(Weight::from_parts(31_157_413, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(i.into()))) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(i.into()))) - .saturating_add(Weight::from_parts(0, 3566).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(0, 7132).saturating_mul(i.into())) } /// Storage: `Staking::SlashingSpans` (r:1 w:1) /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -472,10 +479,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 84_275_000 picoseconds. - Weight::from_parts(92_512_416, 6248) - // Standard Error: 3_633 - .saturating_add(Weight::from_parts(1_315_923, 0).saturating_mul(s.into())) + // Minimum execution time: 89_473_000 picoseconds. + Weight::from_parts(98_055_990, 6248) + // Standard Error: 4_159 + .saturating_add(Weight::from_parts(1_398_203, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(13_u64)) .saturating_add(T::DbWeight::get().writes(12_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -488,10 +495,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `66672` // Estimated: `70137` - // Minimum execution time: 101_707_000 picoseconds. - Weight::from_parts(912_819_462, 70137) - // Standard Error: 57_547 - .saturating_add(Weight::from_parts(4_856_799, 0).saturating_mul(s.into())) + // Minimum execution time: 102_480_000 picoseconds. + Weight::from_parts(1_165_789_820, 70137) + // Standard Error: 77_157 + .saturating_add(Weight::from_parts(6_489_253, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -528,10 +535,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `33297 + n * (377 ±0)` // Estimated: `30944 + n * (3774 ±0)` - // Minimum execution time: 138_657_000 picoseconds. - Weight::from_parts(167_173_445, 30944) - // Standard Error: 25_130 - .saturating_add(Weight::from_parts(44_566_012, 0).saturating_mul(n.into())) + // Minimum execution time: 156_890_000 picoseconds. + Weight::from_parts(202_972_688, 30944) + // Standard Error: 29_972 + .saturating_add(Weight::from_parts(48_226_698, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(14_u64)) .saturating_add(T::DbWeight::get().reads((6_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) @@ -555,10 +562,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1991 + l * (7 ±0)` // Estimated: `8877` - // Minimum execution time: 80_061_000 picoseconds. - Weight::from_parts(82_836_434, 8877) - // Standard Error: 4_348 - .saturating_add(Weight::from_parts(75_744, 0).saturating_mul(l.into())) + // Minimum execution time: 88_482_000 picoseconds. + Weight::from_parts(92_616_600, 8877) + // Standard Error: 4_411 + .saturating_add(Weight::from_parts(117_722, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -593,10 +600,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 92_560_000 picoseconds. - Weight::from_parts(97_684_741, 6248) - // Standard Error: 3_361 - .saturating_add(Weight::from_parts(1_292_732, 0).saturating_mul(s.into())) + // Minimum execution time: 98_489_000 picoseconds. + Weight::from_parts(102_968_643, 6248) + // Standard Error: 4_823 + .saturating_add(Weight::from_parts(1_420_838, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(11_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -642,12 +649,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0 + n * (720 ±0) + v * (3598 ±0)` // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 564_963_000 picoseconds. - Weight::from_parts(569_206_000, 512390) - // Standard Error: 2_033_235 - .saturating_add(Weight::from_parts(68_025_841, 0).saturating_mul(v.into())) - // Standard Error: 202_600 - .saturating_add(Weight::from_parts(17_916_770, 0).saturating_mul(n.into())) + // Minimum execution time: 604_820_000 picoseconds. + Weight::from_parts(608_838_000, 512390) + // Standard Error: 2_300_345 + .saturating_add(Weight::from_parts(72_980_573, 0).saturating_mul(v.into())) + // Standard Error: 229_216 + .saturating_add(Weight::from_parts(20_739_416, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(206_u64)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -678,12 +685,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3175 + n * (911 ±0) + v * (395 ±0)` // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 32_196_540_000 picoseconds. - Weight::from_parts(32_341_871_000, 512390) - // Standard Error: 354_657 - .saturating_add(Weight::from_parts(5_143_440, 0).saturating_mul(v.into())) - // Standard Error: 354_657 - .saturating_add(Weight::from_parts(3_328_189, 0).saturating_mul(n.into())) + // Minimum execution time: 37_380_439_000 picoseconds. + Weight::from_parts(38_187_734_000, 512390) + // Standard Error: 425_319 + .saturating_add(Weight::from_parts(6_001_288, 0).saturating_mul(v.into())) + // Standard Error: 425_319 + .saturating_add(Weight::from_parts(4_129_446, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(201_u64)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -700,10 +707,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `979 + v * (50 ±0)` // Estimated: `3510 + v * (2520 ±0)` - // Minimum execution time: 2_381_903_000 picoseconds. - Weight::from_parts(32_693_059, 3510) - // Standard Error: 10_000 - .saturating_add(Weight::from_parts(4_736_173, 0).saturating_mul(v.into())) + // Minimum execution time: 2_572_838_000 picoseconds. + Weight::from_parts(67_632_557, 3510) + // Standard Error: 12_028 + .saturating_add(Weight::from_parts(5_117_459, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(v.into())) @@ -714,6 +721,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxValidatorsCount` (r:0 w:1) /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxStakedRewards` (r:0 w:1) + /// Proof: `Staking::MaxStakedRewards` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::ChillThreshold` (r:0 w:1) /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxNominatorsCount` (r:0 w:1) @@ -724,9 +733,9 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_434_000 picoseconds. - Weight::from_parts(5_742_000, 0) - .saturating_add(T::DbWeight::get().writes(6_u64)) + // Minimum execution time: 5_962_000 picoseconds. + Weight::from_parts(6_497_000, 0) + .saturating_add(T::DbWeight::get().writes(7_u64)) } /// Storage: `Staking::MinCommission` (r:0 w:1) /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -734,6 +743,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxValidatorsCount` (r:0 w:1) /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxStakedRewards` (r:0 w:1) + /// Proof: `Staking::MaxStakedRewards` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::ChillThreshold` (r:0 w:1) /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxNominatorsCount` (r:0 w:1) @@ -744,9 +755,9 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_588_000 picoseconds. - Weight::from_parts(4_854_000, 0) - .saturating_add(T::DbWeight::get().writes(6_u64)) + // Minimum execution time: 5_227_000 picoseconds. + Weight::from_parts(5_496_000, 0) + .saturating_add(T::DbWeight::get().writes(7_u64)) } /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) @@ -774,8 +785,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1939` // Estimated: `6248` - // Minimum execution time: 68_780_000 picoseconds. - Weight::from_parts(71_479_000, 6248) + // Minimum execution time: 75_129_000 picoseconds. + Weight::from_parts(77_498_000, 6248) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -787,8 +798,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `691` // Estimated: `3510` - // Minimum execution time: 12_268_000 picoseconds. - Weight::from_parts(12_661_000, 3510) + // Minimum execution time: 13_488_000 picoseconds. + Weight::from_parts(14_183_000, 3510) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -798,31 +809,50 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_071_000 picoseconds. - Weight::from_parts(3_334_000, 0) + // Minimum execution time: 3_368_000 picoseconds. + Weight::from_parts(3_582_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + fn restore_ledger() -> Weight { + // Proof Size summary in bytes: + // Measured: `1047` + // Estimated: `4764` + // Minimum execution time: 44_876_000 picoseconds. + Weight::from_parts(46_353_000, 4764) + .saturating_add(T::DbWeight::get().reads(5_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) + } } // For backwards compatibility and tests. impl WeightInfo for () { /// Storage: `Staking::Bonded` (r:1 w:1) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) - /// Storage: `Staking::Ledger` (r:0 w:1) - /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::Payee` (r:0 w:1) /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn bond() -> Weight { // Proof Size summary in bytes: - // Measured: `927` + // Measured: `1042` // Estimated: `4764` - // Minimum execution time: 42_042_000 picoseconds. - Weight::from_parts(43_292_000, 4764) - .saturating_add(RocksDbWeight::get().reads(3_u64)) + // Minimum execution time: 48_753_000 picoseconds. + Weight::from_parts(50_539_000, 4764) + .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } /// Storage: `Staking::Bonded` (r:1 w:0) @@ -841,21 +871,21 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1990` // Estimated: `8877` - // Minimum execution time: 85_050_000 picoseconds. - Weight::from_parts(87_567_000, 8877) + // Minimum execution time: 92_701_000 picoseconds. + Weight::from_parts(95_657_000, 8877) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:0) /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) /// Storage: `Staking::MinNominatorBond` (r:1 w:0) /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:1 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -868,41 +898,43 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2195` // Estimated: `8877` - // Minimum execution time: 89_076_000 picoseconds. - Weight::from_parts(92_715_000, 8877) + // Minimum execution time: 101_049_000 picoseconds. + Weight::from_parts(103_729_000, 8877) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Staking::CurrentEra` (r:1 w:0) - /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:0) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1115` + // Measured: `1297` // Estimated: `4764` - // Minimum execution time: 42_067_000 picoseconds. - Weight::from_parts(43_239_807, 4764) - // Standard Error: 831 - .saturating_add(Weight::from_parts(46_257, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(5_u64)) + // Minimum execution time: 51_672_000 picoseconds. + Weight::from_parts(53_817_441, 4764) + // Standard Error: 1_124 + .saturating_add(Weight::from_parts(49_168, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::SlashingSpans` (r:1 w:1) /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Staking::Bonded` (r:1 w:1) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -928,10 +960,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 86_490_000 picoseconds. - Weight::from_parts(95_358_751, 6248) - // Standard Error: 3_952 - .saturating_add(Weight::from_parts(1_294_907, 0).saturating_mul(s.into())) + // Minimum execution time: 92_846_000 picoseconds. + Weight::from_parts(102_158_606, 6248) + // Standard Error: 4_187 + .saturating_add(Weight::from_parts(1_436_364, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(13_u64)) .saturating_add(RocksDbWeight::get().writes(11_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -939,6 +971,8 @@ impl WeightInfo for () { } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::MinValidatorBond` (r:1 w:0) /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::MinCommission` (r:1 w:0) @@ -949,8 +983,6 @@ impl WeightInfo for () { /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:0) /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:1 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListNodes` (r:1 w:1) /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:1 w:1) @@ -963,31 +995,35 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1372` // Estimated: `4556` - // Minimum execution time: 50_326_000 picoseconds. - Weight::from_parts(52_253_000, 4556) + // Minimum execution time: 58_162_000 picoseconds. + Weight::from_parts(60_124_000, 4556) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:128 w:128) /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) /// The range of component `k` is `[1, 128]`. fn kick(k: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1280 + k * (569 ±0)` + // Measured: `1815 + k * (572 ±0)` // Estimated: `4556 + k * (3033 ±0)` - // Minimum execution time: 29_305_000 picoseconds. - Weight::from_parts(32_199_604, 4556) - // Standard Error: 7_150 - .saturating_add(Weight::from_parts(6_437_124, 0).saturating_mul(k.into())) - .saturating_add(RocksDbWeight::get().reads(1_u64)) + // Minimum execution time: 37_950_000 picoseconds. + Weight::from_parts(34_461_075, 4556) + // Standard Error: 8_013 + .saturating_add(Weight::from_parts(6_696_510, 0).saturating_mul(k.into())) + .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) .saturating_add(Weight::from_parts(0, 3033).saturating_mul(k.into())) } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::MinNominatorBond` (r:1 w:0) /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:1) @@ -998,8 +1034,6 @@ impl WeightInfo for () { /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:1 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListNodes` (r:2 w:2) /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:1 w:1) @@ -1013,10 +1047,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1866 + n * (102 ±0)` // Estimated: `6248 + n * (2520 ±0)` - // Minimum execution time: 63_267_000 picoseconds. - Weight::from_parts(61_741_404, 6248) - // Standard Error: 12_955 - .saturating_add(Weight::from_parts(3_811_743, 0).saturating_mul(n.into())) + // Minimum execution time: 70_167_000 picoseconds. + Weight::from_parts(68_024_084, 6248) + // Standard Error: 14_256 + .saturating_add(Weight::from_parts(4_195_757, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(6_u64)) @@ -1024,6 +1058,8 @@ impl WeightInfo for () { } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Validators` (r:1 w:0) /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:1) @@ -1038,11 +1074,11 @@ impl WeightInfo for () { /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill() -> Weight { // Proof Size summary in bytes: - // Measured: `1650` + // Measured: `1816` // Estimated: `6248` - // Minimum execution time: 52_862_000 picoseconds. - Weight::from_parts(54_108_000, 6248) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Minimum execution time: 61_730_000 picoseconds. + Weight::from_parts(63_430_000, 6248) + .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } /// Storage: `Staking::Ledger` (r:1 w:0) @@ -1055,37 +1091,37 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `902` // Estimated: `4556` - // Minimum execution time: 16_350_000 picoseconds. - Weight::from_parts(16_802_000, 4556) + // Minimum execution time: 20_857_000 picoseconds. + Weight::from_parts(21_615_000, 4556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Staking::Payee` (r:1 w:1) - /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:1 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn update_payee() -> Weight { // Proof Size summary in bytes: // Measured: `969` // Estimated: `4556` - // Minimum execution time: 19_981_000 picoseconds. - Weight::from_parts(20_539_000, 4556) + // Minimum execution time: 24_739_000 picoseconds. + Weight::from_parts(25_785_000, 4556) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::Bonded` (r:1 w:1) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) - /// Storage: `Staking::Ledger` (r:1 w:2) + /// Storage: `Staking::Ledger` (r:2 w:2) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) fn set_controller() -> Weight { // Proof Size summary in bytes: // Measured: `902` - // Estimated: `4556` - // Minimum execution time: 19_304_000 picoseconds. - Weight::from_parts(20_000_000, 4556) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + // Estimated: `8122` + // Minimum execution time: 24_622_000 picoseconds. + Weight::from_parts(25_220_000, 8122) + .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Staking::ValidatorCount` (r:0 w:1) @@ -1094,8 +1130,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_568_000 picoseconds. - Weight::from_parts(2_708_000, 0) + // Minimum execution time: 2_634_000 picoseconds. + Weight::from_parts(2_842_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -1104,8 +1140,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_950_000 picoseconds. - Weight::from_parts(8_348_000, 0) + // Minimum execution time: 8_496_000 picoseconds. + Weight::from_parts(9_016_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -1114,8 +1150,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_967_000 picoseconds. - Weight::from_parts(8_222_000, 0) + // Minimum execution time: 8_510_000 picoseconds. + Weight::from_parts(8_893_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -1124,8 +1160,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_006_000 picoseconds. - Weight::from_parts(8_440_000, 0) + // Minimum execution time: 8_243_000 picoseconds. + Weight::from_parts(8_678_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::Invulnerables` (r:0 w:1) @@ -1135,30 +1171,30 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_524_000 picoseconds. - Weight::from_parts(3_123_608, 0) - // Standard Error: 59 - .saturating_add(Weight::from_parts(11_596, 0).saturating_mul(v.into())) + // Minimum execution time: 2_781_000 picoseconds. + Weight::from_parts(3_441_708, 0) + // Standard Error: 58 + .saturating_add(Weight::from_parts(11_811, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: `Staking::Ledger` (r:5900 w:11800) + /// Storage: `Staking::Ledger` (r:11800 w:11800) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:5900 w:5900) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Payee` (r:5900 w:0) /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:0 w:5900) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// The range of component `i` is `[0, 5900]`. fn deprecate_controller_batch(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1356 + i * (151 ±0)` - // Estimated: `990 + i * (3566 ±0)` - // Minimum execution time: 2_092_000 picoseconds. - Weight::from_parts(2_258_000, 990) - // Standard Error: 32_695 - .saturating_add(Weight::from_parts(16_669_219, 0).saturating_mul(i.into())) - .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(i.into()))) + // Measured: `1746 + i * (229 ±0)` + // Estimated: `990 + i * (7132 ±0)` + // Minimum execution time: 5_331_000 picoseconds. + Weight::from_parts(5_511_000, 990) + // Standard Error: 66_734 + .saturating_add(Weight::from_parts(31_157_413, 0).saturating_mul(i.into())) + .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(i.into()))) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(i.into()))) - .saturating_add(Weight::from_parts(0, 3566).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(0, 7132).saturating_mul(i.into())) } /// Storage: `Staking::SlashingSpans` (r:1 w:1) /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -1193,10 +1229,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 84_275_000 picoseconds. - Weight::from_parts(92_512_416, 6248) - // Standard Error: 3_633 - .saturating_add(Weight::from_parts(1_315_923, 0).saturating_mul(s.into())) + // Minimum execution time: 89_473_000 picoseconds. + Weight::from_parts(98_055_990, 6248) + // Standard Error: 4_159 + .saturating_add(Weight::from_parts(1_398_203, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(13_u64)) .saturating_add(RocksDbWeight::get().writes(12_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -1209,10 +1245,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `66672` // Estimated: `70137` - // Minimum execution time: 101_707_000 picoseconds. - Weight::from_parts(912_819_462, 70137) - // Standard Error: 57_547 - .saturating_add(Weight::from_parts(4_856_799, 0).saturating_mul(s.into())) + // Minimum execution time: 102_480_000 picoseconds. + Weight::from_parts(1_165_789_820, 70137) + // Standard Error: 77_157 + .saturating_add(Weight::from_parts(6_489_253, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1249,10 +1285,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `33297 + n * (377 ±0)` // Estimated: `30944 + n * (3774 ±0)` - // Minimum execution time: 138_657_000 picoseconds. - Weight::from_parts(167_173_445, 30944) - // Standard Error: 25_130 - .saturating_add(Weight::from_parts(44_566_012, 0).saturating_mul(n.into())) + // Minimum execution time: 156_890_000 picoseconds. + Weight::from_parts(202_972_688, 30944) + // Standard Error: 29_972 + .saturating_add(Weight::from_parts(48_226_698, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(14_u64)) .saturating_add(RocksDbWeight::get().reads((6_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) @@ -1276,10 +1312,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1991 + l * (7 ±0)` // Estimated: `8877` - // Minimum execution time: 80_061_000 picoseconds. - Weight::from_parts(82_836_434, 8877) - // Standard Error: 4_348 - .saturating_add(Weight::from_parts(75_744, 0).saturating_mul(l.into())) + // Minimum execution time: 88_482_000 picoseconds. + Weight::from_parts(92_616_600, 8877) + // Standard Error: 4_411 + .saturating_add(Weight::from_parts(117_722, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -1314,10 +1350,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 92_560_000 picoseconds. - Weight::from_parts(97_684_741, 6248) - // Standard Error: 3_361 - .saturating_add(Weight::from_parts(1_292_732, 0).saturating_mul(s.into())) + // Minimum execution time: 98_489_000 picoseconds. + Weight::from_parts(102_968_643, 6248) + // Standard Error: 4_823 + .saturating_add(Weight::from_parts(1_420_838, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(11_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -1363,12 +1399,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0 + n * (720 ±0) + v * (3598 ±0)` // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 564_963_000 picoseconds. - Weight::from_parts(569_206_000, 512390) - // Standard Error: 2_033_235 - .saturating_add(Weight::from_parts(68_025_841, 0).saturating_mul(v.into())) - // Standard Error: 202_600 - .saturating_add(Weight::from_parts(17_916_770, 0).saturating_mul(n.into())) + // Minimum execution time: 604_820_000 picoseconds. + Weight::from_parts(608_838_000, 512390) + // Standard Error: 2_300_345 + .saturating_add(Weight::from_parts(72_980_573, 0).saturating_mul(v.into())) + // Standard Error: 229_216 + .saturating_add(Weight::from_parts(20_739_416, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(206_u64)) .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -1399,12 +1435,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3175 + n * (911 ±0) + v * (395 ±0)` // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 32_196_540_000 picoseconds. - Weight::from_parts(32_341_871_000, 512390) - // Standard Error: 354_657 - .saturating_add(Weight::from_parts(5_143_440, 0).saturating_mul(v.into())) - // Standard Error: 354_657 - .saturating_add(Weight::from_parts(3_328_189, 0).saturating_mul(n.into())) + // Minimum execution time: 37_380_439_000 picoseconds. + Weight::from_parts(38_187_734_000, 512390) + // Standard Error: 425_319 + .saturating_add(Weight::from_parts(6_001_288, 0).saturating_mul(v.into())) + // Standard Error: 425_319 + .saturating_add(Weight::from_parts(4_129_446, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(201_u64)) .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -1421,10 +1457,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `979 + v * (50 ±0)` // Estimated: `3510 + v * (2520 ±0)` - // Minimum execution time: 2_381_903_000 picoseconds. - Weight::from_parts(32_693_059, 3510) - // Standard Error: 10_000 - .saturating_add(Weight::from_parts(4_736_173, 0).saturating_mul(v.into())) + // Minimum execution time: 2_572_838_000 picoseconds. + Weight::from_parts(67_632_557, 3510) + // Standard Error: 12_028 + .saturating_add(Weight::from_parts(5_117_459, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(v.into())) @@ -1435,6 +1471,8 @@ impl WeightInfo for () { /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxValidatorsCount` (r:0 w:1) /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxStakedRewards` (r:0 w:1) + /// Proof: `Staking::MaxStakedRewards` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::ChillThreshold` (r:0 w:1) /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxNominatorsCount` (r:0 w:1) @@ -1445,9 +1483,9 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_434_000 picoseconds. - Weight::from_parts(5_742_000, 0) - .saturating_add(RocksDbWeight::get().writes(6_u64)) + // Minimum execution time: 5_962_000 picoseconds. + Weight::from_parts(6_497_000, 0) + .saturating_add(RocksDbWeight::get().writes(7_u64)) } /// Storage: `Staking::MinCommission` (r:0 w:1) /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -1455,6 +1493,8 @@ impl WeightInfo for () { /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxValidatorsCount` (r:0 w:1) /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxStakedRewards` (r:0 w:1) + /// Proof: `Staking::MaxStakedRewards` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::ChillThreshold` (r:0 w:1) /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxNominatorsCount` (r:0 w:1) @@ -1465,9 +1505,9 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_588_000 picoseconds. - Weight::from_parts(4_854_000, 0) - .saturating_add(RocksDbWeight::get().writes(6_u64)) + // Minimum execution time: 5_227_000 picoseconds. + Weight::from_parts(5_496_000, 0) + .saturating_add(RocksDbWeight::get().writes(7_u64)) } /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) @@ -1495,8 +1535,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1939` // Estimated: `6248` - // Minimum execution time: 68_780_000 picoseconds. - Weight::from_parts(71_479_000, 6248) + // Minimum execution time: 75_129_000 picoseconds. + Weight::from_parts(77_498_000, 6248) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -1508,8 +1548,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `691` // Estimated: `3510` - // Minimum execution time: 12_268_000 picoseconds. - Weight::from_parts(12_661_000, 3510) + // Minimum execution time: 13_488_000 picoseconds. + Weight::from_parts(14_183_000, 3510) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1519,8 +1559,27 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_071_000 picoseconds. - Weight::from_parts(3_334_000, 0) + // Minimum execution time: 3_368_000 picoseconds. + Weight::from_parts(3_582_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + fn restore_ledger() -> Weight { + // Proof Size summary in bytes: + // Measured: `1047` + // Estimated: `4764` + // Minimum execution time: 44_876_000 picoseconds. + Weight::from_parts(46_353_000, 4764) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } } diff --git a/substrate/frame/state-trie-migration/Cargo.toml b/substrate/frame/state-trie-migration/Cargo.toml index e837956613edd2a30146bc6e41b4518d55c95095..613308c308e1f15eace901af1a192202f1aa8cad 100644 --- a/substrate/frame/state-trie-migration/Cargo.toml +++ b/substrate/frame/state-trie-migration/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true, default-features = true } thousands = { version = "0.2.0", optional = true } zstd = { version = "0.12.4", default-features = false, optional = true } diff --git a/substrate/frame/statement/Cargo.toml b/substrate/frame/statement/Cargo.toml index 6827dbda962b3d4973133f38b723579ab1ddfd4d..92bc32191ab84d41e9dd18ef5f3863d2c5502edd 100644 --- a/substrate/frame/statement/Cargo.toml +++ b/substrate/frame/statement/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } sp-statement-store = { path = "../../primitives/statement-store", default-features = false } diff --git a/substrate/frame/sudo/Cargo.toml b/substrate/frame/sudo/Cargo.toml index 409104aeca111866c2d8023763ec4a2e47de8a3c..805f46a77f2eb0c3f7433a83659972a9d7da65c7 100644 --- a/substrate/frame/sudo/Cargo.toml +++ b/substrate/frame/sudo/Cargo.toml @@ -18,14 +18,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } sp-io = { path = "../../primitives/io", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false } sp-std = { path = "../../primitives/std", default-features = false } -docify = "0.2.7" +docify = "0.2.8" [dev-dependencies] sp-core = { path = "../../primitives/core" } diff --git a/substrate/frame/sudo/src/lib.rs b/substrate/frame/sudo/src/lib.rs index d8b734e6bbef9f1c59cabed9b4cbea8d3dbaecc8..63b68e694307ecb4ee3986118f780df1ae49f609 100644 --- a/substrate/frame/sudo/src/lib.rs +++ b/substrate/frame/sudo/src/lib.rs @@ -38,7 +38,7 @@ //! In Substrate blockchains, pallets may contain dispatchable calls that can only be called at //! the system level of the chain (i.e. dispatchables that require a `Root` origin). //! Setting a privileged account, called the _sudo key_, allows you to make such calls as an -//! extrinisic. +//! extrinsic. //! //! Here's an example of a privileged function in another pallet: //! diff --git a/substrate/frame/support/Cargo.toml b/substrate/frame/support/Cargo.toml index be60068f1220b701e15f440b11b9775a5bcbaaf1..ecdd93826327798a76b36a54dc6ad0a2b0106c24 100644 --- a/substrate/frame/support/Cargo.toml +++ b/substrate/frame/support/Cargo.toml @@ -22,7 +22,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", "max-encoded-len", ] } -scale-info = { version = "2.10.0", default-features = false, features = [ +scale-info = { version = "2.11.1", default-features = false, features = [ "derive", ] } frame-metadata = { version = "16.0.0", default-features = false, features = [ @@ -58,7 +58,7 @@ k256 = { version = "0.13.1", default-features = false, features = ["ecdsa"] } environmental = { version = "1.1.4", default-features = false } sp-genesis-builder = { path = "../../primitives/genesis-builder", default-features = false } serde_json = { features = ["alloc"], workspace = true } -docify = "0.2.7" +docify = "0.2.8" static_assertions = "1.1.0" aquamarine = { version = "0.5.0" } diff --git a/substrate/frame/support/procedural/Cargo.toml b/substrate/frame/support/procedural/Cargo.toml index dd0688f2ad06a94332bda9f5401a5578aae04e28..9f8727f7adef58b68876dadc8907877c886d2d5d 100644 --- a/substrate/frame/support/procedural/Cargo.toml +++ b/substrate/frame/support/procedural/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -derive-syn-parse = "0.1.5" +derive-syn-parse = "0.2.0" Inflector = "0.11.4" cfg-expr = "0.15.5" itertools = "0.10.3" diff --git a/substrate/frame/support/procedural/src/benchmark.rs b/substrate/frame/support/procedural/src/benchmark.rs index ea53ad263a1601815fbeb0c4fc05d8db2ea20148..0a62c3f92a6eeef38a84a6b2e915f1103d395fa1 100644 --- a/substrate/frame/support/procedural/src/benchmark.rs +++ b/substrate/frame/support/procedural/src/benchmark.rs @@ -249,7 +249,7 @@ impl BenchmarkCallDef { } } -/// Represents a parsed `#[benchmark]` or `#[instance_banchmark]` item. +/// Represents a parsed `#[benchmark]` or `#[instance_benchmark]` item. #[derive(Clone)] struct BenchmarkDef { params: Vec, @@ -466,7 +466,7 @@ pub fn benchmarks( let mod_vis = module.vis; let mod_name = module.ident; - // consume #[benchmarks] attribute by exclusing it from mod_attrs + // consume #[benchmarks] attribute by excluding it from mod_attrs let mod_attrs: Vec<&Attribute> = module .attrs .iter() diff --git a/substrate/frame/support/procedural/src/construct_runtime/mod.rs b/substrate/frame/support/procedural/src/construct_runtime/mod.rs index 1937dfa9ca4d0e966aa1c46e1b240fe53dc2ab54..b083abbb2a8db65e1abd1f0cc2e4a15dbd922e2d 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/mod.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/mod.rs @@ -24,7 +24,7 @@ //! - Implicitly: `System: frame_system` //! - Explicitly: `System: frame_system::{Pallet, Call}` //! -//! The `construct_runtime` transitions from the implicit definition to the explict one. +//! The `construct_runtime` transitions from the implicit definition to the explicit one. //! From the explicit state, Substrate expands the pallets with additional information //! that is to be included in the runtime metadata. This expansion makes visible some extra //! parts of the pallets, mainly the `Error` if defined. The expanded state looks like @@ -55,7 +55,7 @@ //! +----------+ +------------------+ //! ``` //! -//! When all pallet parts are implcit, then the `construct_runtime!` macro expands to its final +//! When all pallet parts are implicit, then the `construct_runtime!` macro expands to its final //! state, the `ExplicitExpanded`. Otherwise, all implicit parts are converted to an explicit //! expanded part allow the `construct_runtime!` to expand any remaining explicit parts to an //! explicit expanded part. @@ -202,7 +202,7 @@ //! Similarly to the previous transition, the macro expansion transforms `System: //! frame_system::{Pallet, Call}` into `System: frame_system expanded::{Error} ::{Pallet, Call}`. //! The `expanded` section adds extra parts that the Substrate would like to expose for each pallet -//! by default. This is done to expose the approprite types for metadata construction. +//! by default. This is done to expose the appropriate types for metadata construction. //! //! This time, instead of calling `tt_default_parts` we are using the `tt_extra_parts` macro. //! This macro returns the ` :: expanded { Error }` list of additional parts we would like to diff --git a/substrate/frame/support/procedural/src/construct_runtime/parse.rs b/substrate/frame/support/procedural/src/construct_runtime/parse.rs index 88f3f14dc86c541ef949beced9feaf222e9a3146..31866c787b0f339d6d83b810476e840216f70bbc 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/parse.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/parse.rs @@ -322,7 +322,7 @@ impl Parse for PalletDeclaration { /// A struct representing a path to a pallet. `PalletPath` is almost identical to the standard /// Rust path with a few restrictions: /// - No leading colons allowed -/// - Path segments can only consist of identifers separated by colons +/// - Path segments can only consist of identifiers separated by colons #[derive(Debug, Clone)] pub struct PalletPath { pub inner: Path, @@ -595,7 +595,7 @@ pub struct Pallet { pub is_expanded: bool, /// The name of the pallet, e.g.`System` in `System: frame_system`. pub name: Ident, - /// Either automatically infered, or defined (e.g. `MyPallet ... = 3,`). + /// Either automatically inferred, or defined (e.g. `MyPallet ... = 3,`). pub index: u8, /// The path of the pallet, e.g. `frame_system` in `System: frame_system`. pub path: PalletPath, @@ -634,7 +634,7 @@ impl Pallet { /// +----------+ +----------+ +------------------+ /// ``` enum PalletsConversion { - /// Pallets implicitely declare parts. + /// Pallets implicitly declare parts. /// /// `System: frame_system`. Implicit(Vec), @@ -648,7 +648,7 @@ enum PalletsConversion { /// Pallets explicitly declare parts that are fully expanded. /// /// This is the end state that contains extra parts included by - /// default by Subtrate. + /// default by Substrate. /// /// `System: frame_system expanded::{Error} ::{Pallet, Call}` /// @@ -660,7 +660,7 @@ enum PalletsConversion { /// /// Check if all pallet have explicit declaration of their parts, if so then assign index to each /// pallet using same rules as rust for fieldless enum. I.e. implicit are assigned number -/// incrementedly from last explicit or 0. +/// incrementally from last explicit or 0. fn convert_pallets(pallets: Vec) -> syn::Result { if pallets.iter().any(|pallet| pallet.pallet_parts.is_none()) { return Ok(PalletsConversion::Implicit(pallets)) diff --git a/substrate/frame/support/procedural/src/derive_impl.rs b/substrate/frame/support/procedural/src/derive_impl.rs index 8740ccd401abedcbcb27e19c77ef3e885207342a..54755f1163a1de279cad99b16f9500ccb39956eb 100644 --- a/substrate/frame/support/procedural/src/derive_impl.rs +++ b/substrate/frame/support/procedural/src/derive_impl.rs @@ -279,7 +279,7 @@ fn test_runtime_type_with_doc() { } #[test] -fn test_disambugation_path() { +fn test_disambiguation_path() { let foreign_impl: ItemImpl = parse_quote!(impl SomeTrait for SomeType {}); let default_impl_path: Path = parse_quote!(SomeScope::SomeType); diff --git a/substrate/frame/support/procedural/src/dynamic_params.rs b/substrate/frame/support/procedural/src/dynamic_params.rs index b718ccbc9558480587a1f50f4e85a992e5c750da..29399a885bc671ddfeefe3d8eefba3d235ad4899 100644 --- a/substrate/frame/support/procedural/src/dynamic_params.rs +++ b/substrate/frame/support/procedural/src/dynamic_params.rs @@ -147,8 +147,8 @@ fn ensure_codec_index(attrs: &Vec, span: Span) -> Result<()> { /// Used to inject arguments into the inner `#[dynamic_pallet_params(..)]` attribute. /// -/// This allows the outer `#[dynamic_params(..)]` attribute to specify some arguments that dont need -/// to be repeated every time. +/// This allows the outer `#[dynamic_params(..)]` attribute to specify some arguments that don't +/// need to be repeated every time. struct MacroInjectArgs { runtime_params: syn::Ident, params_pallet: syn::Type, @@ -311,7 +311,7 @@ impl ToTokens for DynamicPalletParamAttr { )* } - impl #scrate::traits::dynamic_params::AggregratedKeyValue for Parameters { + impl #scrate::traits::dynamic_params::AggregatedKeyValue for Parameters { type Key = #key_ident; type Value = #value_ident; @@ -497,7 +497,7 @@ impl ToTokens for DynamicParamAggregatedEnum { #vis enum #params_key_ident { #( #(#attributes)* - #param_names(<#param_types as #scrate::traits::dynamic_params::AggregratedKeyValue>::Key), + #param_names(<#param_types as #scrate::traits::dynamic_params::AggregatedKeyValue>::Key), )* } @@ -515,11 +515,11 @@ impl ToTokens for DynamicParamAggregatedEnum { #vis enum #params_value_ident { #( #(#attributes)* - #param_names(<#param_types as #scrate::traits::dynamic_params::AggregratedKeyValue>::Value), + #param_names(<#param_types as #scrate::traits::dynamic_params::AggregatedKeyValue>::Value), )* } - impl #scrate::traits::dynamic_params::AggregratedKeyValue for #name { + impl #scrate::traits::dynamic_params::AggregatedKeyValue for #name { type Key = #params_key_ident; type Value = #params_value_ident; @@ -536,13 +536,13 @@ impl ToTokens for DynamicParamAggregatedEnum { } #( - impl ::core::convert::From<<#param_types as #scrate::traits::dynamic_params::AggregratedKeyValue>::Key> for #params_key_ident { - fn from(key: <#param_types as #scrate::traits::dynamic_params::AggregratedKeyValue>::Key) -> Self { + impl ::core::convert::From<<#param_types as #scrate::traits::dynamic_params::AggregatedKeyValue>::Key> for #params_key_ident { + fn from(key: <#param_types as #scrate::traits::dynamic_params::AggregatedKeyValue>::Key) -> Self { #params_key_ident::#param_names(key) } } - impl ::core::convert::TryFrom<#params_value_ident> for <#param_types as #scrate::traits::dynamic_params::AggregratedKeyValue>::Value { + impl ::core::convert::TryFrom<#params_value_ident> for <#param_types as #scrate::traits::dynamic_params::AggregatedKeyValue>::Value { type Error = (); fn try_from(value: #params_value_ident) -> Result { diff --git a/substrate/frame/support/procedural/src/lib.rs b/substrate/frame/support/procedural/src/lib.rs index dee6d522d25ce90fe7ab254147fd31dc46ed3f3d..bc62c0509b05b2c8cc1f2bf518c58748baaea0de 100644 --- a/substrate/frame/support/procedural/src/lib.rs +++ b/substrate/frame/support/procedural/src/lib.rs @@ -137,7 +137,7 @@ fn counter_prefix(prefix: &str) -> String { /// - `Call` - If the pallet has callable functions /// - `Storage` - If the pallet uses storage /// - `Event` or `Event` (if the event is generic) - If the pallet emits events -/// - `Origin` or `Origin` (if the origin is generic) - If the pallet has instanciable origins +/// - `Origin` or `Origin` (if the origin is generic) - If the pallet has instantiable origins /// - `Config` or `Config` (if the config is generic) - If the pallet builds the genesis /// storage with `GenesisConfig` /// - `Inherent` - If the pallet provides/can check inherents. @@ -166,7 +166,7 @@ fn counter_prefix(prefix: &str) -> String { /// and `Event` are encoded, and to define the ModuleToIndex value. /// /// if `= $n` is not given, then index is resolved in the same way as fieldless enum in Rust -/// (i.e. incrementedly from previous index): +/// (i.e. incrementally from previous index): /// ```nocompile /// pallet1 .. = 2, /// pallet2 .., // Here pallet2 is given index 3 @@ -460,7 +460,7 @@ pub fn storage_alias(attributes: TokenStream, input: TokenStream) -> TokenStream } /// This attribute can be used to derive a full implementation of a trait based on a local partial -/// impl and an external impl containing defaults that can be overriden in the local impl. +/// impl and an external impl containing defaults that can be overridden in the local impl. /// /// For a full end-to-end example, see [below](#use-case-auto-derive-test-pallet-config-traits). /// diff --git a/substrate/frame/support/procedural/src/pallet/parse/mod.rs b/substrate/frame/support/procedural/src/pallet/parse/mod.rs index b55f130a93a7ee7eef377678c4fc1ea664644601..6e12774611ddfd0b4cdf53468df074dc7315e382 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/mod.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/mod.rs @@ -321,7 +321,7 @@ impl Def { Ok(()) } - /// Tries to locate a manual tasks impl (an impl impling a trait whose last path segment is + /// Tries to locate a manual tasks impl (an impl implementing a trait whose last path segment is /// `Task`) in the event that one has not been found already via the attribute macro pub fn resolve_manual_tasks_impl( tasks: &mut Option, diff --git a/substrate/frame/support/src/dispatch_context.rs b/substrate/frame/support/src/dispatch_context.rs index 608187b72206399913e337a5fa9f5c1e42277978..254302c8f14d2db1987353155ab7cacde65a5bb3 100644 --- a/substrate/frame/support/src/dispatch_context.rs +++ b/substrate/frame/support/src/dispatch_context.rs @@ -18,7 +18,7 @@ //! Provides functions to interact with the dispatch context. //! //! A Dispatch context is created by calling [`run_in_context`] and then the given closure will be -//! executed in this dispatch context. Everyting run in this `closure` will have access to the same +//! executed in this dispatch context. Everything run in this `closure` will have access to the same //! dispatch context. This also applies to nested calls of [`run_in_context`]. The dispatch context //! can be used to store and retrieve information locally in this context. The dispatch context can //! be accessed by using [`with_context`]. This function will execute the given closure and give it @@ -51,7 +51,7 @@ //! //! run_in_context(|| { //! with_context::(|v| { -//! // Intitialize the value to the default value. +//! // Initialize the value to the default value. //! assert_eq!(0, v.or_default().0); //! v.or_default().0 = 10; //! }); diff --git a/substrate/frame/support/src/instances.rs b/substrate/frame/support/src/instances.rs index 396018d5cbd5289db9c7c6e89d861adee4b2df9a..ecb356af50b6fd2dded7bd4d7a5536f2ce7bf069 100644 --- a/substrate/frame/support/src/instances.rs +++ b/substrate/frame/support/src/instances.rs @@ -31,83 +31,83 @@ //! NOTE: [`frame_support::pallet`] will reexport them inside the module, in order to make them //! accessible to [`frame_support::construct_runtime`]. -/// `Instance1` to be used for instantiable palllets defined with the +/// `Instance1` to be used for instantiable pallets defined with the /// [`#[pallet]`](`frame_support::pallet`) macro. Instances 2-16 are also available but are hidden /// from docs. #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance1; -/// `Instance2` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance2` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance2; -/// `Instance3` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance3` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance3; -/// `Instance4` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance4` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance4; -/// `Instance5` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance5` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance5; -/// `Instance6` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance6` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance6; -/// `Instance7` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance7` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance7; -/// `Instance8` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance8` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance8; -/// `Instance9` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance9` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance9; -/// `Instance10` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance10` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance10; -/// `Instance11` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance11` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance11; -/// `Instance12` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance12` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance12; -/// `Instance13` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance13` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance13; -/// `Instance14` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance14` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance14; -/// `Instance15` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance15` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance15; -/// `Instance16` to be used for instantiable palllets defined with the `#[pallet]` macro. +/// `Instance16` to be used for instantiable pallets defined with the `#[pallet]` macro. #[doc(hidden)] #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance16; diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index c524f8953a46a5e768b3dc538557975c1d6e1c75..895215d364e3dfccad33a805c6db34857c014ae8 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -145,7 +145,7 @@ impl TypeId for PalletId { /// # Examples /// /// There are different ways to declare the `prefix` to use. The `prefix` type can either be -/// declared explicetly by passing it to the macro as an attribute or by letting the macro +/// declared explicitly by passing it to the macro as an attribute or by letting the macro /// guess on what the `prefix` type is. The `prefix` is always passed as the first generic /// argument to the type declaration. When using [`#[pallet::storage]`](pallet_macros::storage) /// this first generic argument is always `_`. Besides declaring the `prefix`, the rest of the @@ -1110,7 +1110,7 @@ pub mod pallet_macros { /// Declares a storage as unbounded in potential size. /// - /// When implementating the storage info (when `#[pallet::generate_storage_info]` is + /// When implementing the storage info (when `#[pallet::generate_storage_info]` is /// specified on the pallet struct placeholder), the size of the storage will be declared /// as unbounded. This can be useful for storage which can never go into PoV (Proof of /// Validity). @@ -2340,7 +2340,7 @@ pub mod pallet_macros { /// Allows defining conditions for a task to run. /// - /// This attribute is attached to a function inside an `impl` block annoated with + /// This attribute is attached to a function inside an `impl` block annotated with /// [`pallet::tasks_experimental`](`tasks_experimental`) to define the conditions for a /// given work item to be valid. /// @@ -2351,7 +2351,7 @@ pub mod pallet_macros { /// Allows defining an index for a task. /// - /// This attribute is attached to a function inside an `impl` block annoated with + /// This attribute is attached to a function inside an `impl` block annotated with /// [`pallet::tasks_experimental`](`tasks_experimental`) to define the index of a given /// work item. /// @@ -2361,7 +2361,7 @@ pub mod pallet_macros { /// Allows defining an iterator over available work items for a task. /// - /// This attribute is attached to a function inside an `impl` block annoated with + /// This attribute is attached to a function inside an `impl` block annotated with /// [`pallet::tasks_experimental`](`tasks_experimental`). /// /// It takes an iterator as input that yields a tuple with same types as the function @@ -2370,7 +2370,7 @@ pub mod pallet_macros { /// Allows defining the weight of a task. /// - /// This attribute is attached to a function inside an `impl` block annoated with + /// This attribute is attached to a function inside an `impl` block annotated with /// [`pallet::tasks_experimental`](`tasks_experimental`) define the weight of a given work /// item. /// diff --git a/substrate/frame/support/src/migrations.rs b/substrate/frame/support/src/migrations.rs index 2ceab44cb16bd4bcf356caa07a3506aa76ba79db..b8cbcd69048147e7380357aa0b1adcdd4d03a0bc 100644 --- a/substrate/frame/support/src/migrations.rs +++ b/substrate/frame/support/src/migrations.rs @@ -41,19 +41,19 @@ use sp_std::{marker::PhantomData, vec::Vec}; /// It takes 5 type parameters: /// - `From`: The version being upgraded from. /// - `To`: The version being upgraded to. -/// - `Inner`: An implementation of `OnRuntimeUpgrade`. +/// - `Inner`: An implementation of `UncheckedOnRuntimeUpgrade`. /// - `Pallet`: The Pallet being upgraded. /// - `Weight`: The runtime's RuntimeDbWeight implementation. /// /// When a [`VersionedMigration`] `on_runtime_upgrade`, `pre_upgrade`, or `post_upgrade` method is /// called, the on-chain version of the pallet is compared to `From`. If they match, the `Inner` -/// equivalent is called and the pallets on-chain version is set to `To` after the migration. -/// Otherwise, a warning is logged notifying the developer that the upgrade was a noop and should -/// probably be removed. +/// `UncheckedOnRuntimeUpgrade` is called and the pallets on-chain version is set to `To` +/// after the migration. Otherwise, a warning is logged notifying the developer that the upgrade was +/// a noop and should probably be removed. /// -/// It is STRONGLY RECOMMENDED to write the unversioned migration logic in a private module and -/// only export the versioned migration logic to prevent accidentally using the unversioned -/// migration in any runtimes. +/// By not bounding `Inner` with `OnRuntimeUpgrade`, we prevent developers from +/// accidentally using the unchecked version of the migration in a runtime upgrade instead of +/// [`VersionedMigration`]. /// /// ### Examples /// ```ignore @@ -71,9 +71,9 @@ use sp_std::{marker::PhantomData, vec::Vec}; /// /// - https://internals.rust-lang.org/t/lang-team-minutes-private-in-public-rules/4504/40 /// mod version_unchecked { /// use super::*; -/// pub struct MigrateV5ToV6(sp_std::marker::PhantomData); -/// impl OnRuntimeUpgrade for VersionUncheckedMigrateV5ToV6 { -/// // OnRuntimeUpgrade implementation... +/// pub struct VersionUncheckedMigrateV5ToV6(sp_std::marker::PhantomData); +/// impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateV5ToV6 { +/// // `UncheckedOnRuntimeUpgrade` implementation... /// } /// } /// @@ -116,7 +116,7 @@ pub enum VersionedPostUpgradeData { impl< const FROM: u16, const TO: u16, - Inner: crate::traits::OnRuntimeUpgrade, + Inner: crate::traits::UncheckedOnRuntimeUpgrade, Pallet: GetStorageVersion + PalletInfoAccess, DbWeight: Get, > crate::traits::OnRuntimeUpgrade for VersionedMigration diff --git a/substrate/frame/support/src/storage/migration.rs b/substrate/frame/support/src/storage/migration.rs index 568c475bdc69d6adc9f59bc0b059b48987bd04e3..252625cf4f7d02aa82418ca50b45334c53692766 100644 --- a/substrate/frame/support/src/storage/migration.rs +++ b/substrate/frame/support/src/storage/migration.rs @@ -303,11 +303,11 @@ pub fn take_storage_item /// Move a storage from a pallet prefix to another pallet prefix. /// /// Keys used in pallet storages always start with: -/// `concat(twox_128(pallet_name), towx_128(storage_name))`. +/// `concat(twox_128(pallet_name), twox_128(storage_name))`. /// /// This function will remove all value for which the key start with -/// `concat(twox_128(old_pallet_name), towx_128(storage_name))` and insert them at the key with -/// the start replaced by `concat(twox_128(new_pallet_name), towx_128(storage_name))`. +/// `concat(twox_128(old_pallet_name), twox_128(storage_name))` and insert them at the key with +/// the start replaced by `concat(twox_128(new_pallet_name), twox_128(storage_name))`. /// /// # Example /// @@ -339,7 +339,7 @@ pub fn move_storage_from_pallet( /// Move all storages from a pallet prefix to another pallet prefix. /// /// Keys used in pallet storages always start with: -/// `concat(twox_128(pallet_name), towx_128(storage_name))`. +/// `concat(twox_128(pallet_name), twox_128(storage_name))`. /// /// This function will remove all value for which the key start with `twox_128(old_pallet_name)` /// and insert them at the key with the start replaced by `twox_128(new_pallet_name)`. diff --git a/substrate/frame/support/src/storage/mod.rs b/substrate/frame/support/src/storage/mod.rs index 8ebe7b31da80d3511b4a0003637d1f45b52d5b11..f7d7447482d06ccd64e59c3b8e49aec18696fdee 100644 --- a/substrate/frame/support/src/storage/mod.rs +++ b/substrate/frame/support/src/storage/mod.rs @@ -159,7 +159,7 @@ pub trait StorageValue { /// /// # Warning /// - /// `None` does not mean that `get()` does not return a value. The default value is completly + /// `None` does not mean that `get()` does not return a value. The default value is completely /// ignored by this function. fn decode_len() -> Option where @@ -363,7 +363,7 @@ pub trait StorageMap { /// /// # Warning /// - /// `None` does not mean that `get()` does not return a value. The default value is completly + /// `None` does not mean that `get()` does not return a value. The default value is completely /// ignored by this function. fn decode_len>(key: KeyArg) -> Option where @@ -381,7 +381,8 @@ pub trait StorageMap { /// /// # Warning /// - /// - `None` does not mean that `get()` does not return a value. The default value is completly + /// - `None` does not mean that `get()` does not return a value. The default value is + /// completely /// ignored by this function. /// /// - The value returned is the non-deduplicated length of the underlying Vector in storage.This @@ -410,7 +411,7 @@ pub trait StorageMap { pub trait IterableStorageMap: StorageMap { /// The type that iterates over all `(key, value)`. type Iterator: Iterator; - /// The type that itereates over all `key`s. + /// The type that iterates over all `key`s. type KeyIterator: Iterator; /// Enumerate all elements in the map in lexicographical order of the encoded key. If you @@ -777,7 +778,7 @@ pub trait StorageDoubleMap { /// /// # Warning /// - /// `None` does not mean that `get()` does not return a value. The default value is completly + /// `None` does not mean that `get()` does not return a value. The default value is completely /// ignored by this function. fn decode_len(key1: KArg1, key2: KArg2) -> Option where @@ -798,7 +799,7 @@ pub trait StorageDoubleMap { /// /// # Warning /// - /// `None` does not mean that `get()` does not return a value. The default value is completly + /// `None` does not mean that `get()` does not return a value. The default value is completely /// ignored by this function. fn decode_non_dedup_len(key1: KArg1, key2: KArg2) -> Option where @@ -980,7 +981,7 @@ pub trait StorageNMap { /// /// # Warning /// - /// `None` does not mean that `get()` does not return a value. The default value is completly + /// `None` does not mean that `get()` does not return a value. The default value is completely /// ignored by this function. fn decode_len + TupleToEncodedIter>(key: KArg) -> Option where @@ -1488,8 +1489,8 @@ pub trait StorageDecodeLength: private::Sealed + codec::DecodeLength { } } -/// It is expected that the length is at the beginning of the encoded objectand that the length is a -/// `Compact`. +/// It is expected that the length is at the beginning of the encoded object and that the length is +/// a `Compact`. /// /// # Note /// The length returned by this trait is not deduplicated, i.e. it is the length of the underlying @@ -1790,7 +1791,7 @@ mod test { }); } - // This test ensures that the Digest encoding does not change without being noticied. + // This test ensures that the Digest encoding does not change without being noticed. #[test] fn digest_storage_append_works_as_expected() { TestExternalities::default().execute_with(|| { diff --git a/substrate/frame/support/src/storage/stream_iter.rs b/substrate/frame/support/src/storage/stream_iter.rs index 2205601938b880f88bcdbc20680e7d6254badc8b..529b2f387c71cc86b52f648f8630bd9f2339dd69 100644 --- a/substrate/frame/support/src/storage/stream_iter.rs +++ b/substrate/frame/support/src/storage/stream_iter.rs @@ -217,7 +217,7 @@ const STORAGE_INPUT_BUFFER_CAPACITY: usize = 2 * 1024; /// Implementation of [`codec::Input`] using [`sp_io::storage::read`]. /// /// Keeps an internal buffer with a size of [`STORAGE_INPUT_BUFFER_CAPACITY`]. All read accesses -/// are tried to be served by this buffer. If the buffer doesn't hold enough bytes to fullfill the +/// are tried to be served by this buffer. If the buffer doesn't hold enough bytes to fulfill the /// current read access, the buffer is re-filled from the state. A read request that is bigger than /// the internal buffer is directly forwarded to the state to reduce the number of reads from the /// state. diff --git a/substrate/frame/support/src/storage/types/counted_map.rs b/substrate/frame/support/src/storage/types/counted_map.rs index 04e69751c16a00e4b89fdf5907bec5be64305014..0444e269928ab68e6da1ebada37425d77bdcc288 100644 --- a/substrate/frame/support/src/storage/types/counted_map.rs +++ b/substrate/frame/support/src/storage/types/counted_map.rs @@ -310,7 +310,7 @@ where /// /// # Warning /// - /// `None` does not mean that `get()` does not return a value. The default value is completly + /// `None` does not mean that `get()` does not return a value. The default value is completely /// ignored by this function. pub fn decode_len>(key: KeyArg) -> Option where @@ -775,13 +775,13 @@ mod test { assert_eq!(A::try_get(1), Err(())); assert_eq!(A::count(), 3); - // Take exsisting. + // Take existing. assert_eq!(A::take(4), 10); assert_eq!(A::try_get(4), Err(())); assert_eq!(A::count(), 2); - // Take non-exsisting. + // Take non-existing. assert_eq!(A::take(4), ADefault::get()); assert_eq!(A::try_get(4), Err(())); @@ -1022,13 +1022,13 @@ mod test { assert_eq!(B::try_get(1), Err(())); assert_eq!(B::count(), 3); - // Take exsisting. + // Take existing. assert_eq!(B::take(4), Some(10)); assert_eq!(B::try_get(4), Err(())); assert_eq!(B::count(), 2); - // Take non-exsisting. + // Take non-existing. assert_eq!(B::take(4), None); assert_eq!(B::try_get(4), Err(())); diff --git a/substrate/frame/support/src/storage/types/counted_nmap.rs b/substrate/frame/support/src/storage/types/counted_nmap.rs index 279894ee97363b38462f83e011f8fee9f0fc233b..51cde93f28c01d8dab45da49fddb0b5eeb1c54d1 100644 --- a/substrate/frame/support/src/storage/types/counted_nmap.rs +++ b/substrate/frame/support/src/storage/types/counted_nmap.rs @@ -378,7 +378,7 @@ where /// /// # Warning /// - /// `None` does not mean that `get()` does not return a value. The default value is completly + /// `None` does not mean that `get()` does not return a value. The default value is completely /// ignored by this function. pub fn decode_len + TupleToEncodedIter>( key: KArg, diff --git a/substrate/frame/support/src/storage/types/double_map.rs b/substrate/frame/support/src/storage/types/double_map.rs index cb9479d491cff7206eb36996097e231439fd1769..2a7af7a984633ebcb79572e30595359758b50154 100644 --- a/substrate/frame/support/src/storage/types/double_map.rs +++ b/substrate/frame/support/src/storage/types/double_map.rs @@ -445,7 +445,7 @@ where /// /// # Warning /// - /// `None` does not mean that `get()` does not return a value. The default value is completly + /// `None` does not mean that `get()` does not return a value. The default value is completely /// ignored by this function. pub fn decode_len(key1: KArg1, key2: KArg2) -> Option where @@ -465,7 +465,8 @@ where /// /// # Warning /// - /// - `None` does not mean that `get()` does not return a value. The default value is completly + /// - `None` does not mean that `get()` does not return a value. The default value is + /// completely /// ignored by this function. /// /// - The value returned is the non-deduplicated length of the underlying Vector in storage.This diff --git a/substrate/frame/support/src/storage/types/map.rs b/substrate/frame/support/src/storage/types/map.rs index ee5db74583b03f8646553d7fc8c1c75042e7f68a..b79a6ae9b8482090c3f3ebe42b7b23aa8824b657 100644 --- a/substrate/frame/support/src/storage/types/map.rs +++ b/substrate/frame/support/src/storage/types/map.rs @@ -277,7 +277,7 @@ where /// /// # Warning /// - /// `None` does not mean that `get()` does not return a value. The default value is completly + /// `None` does not mean that `get()` does not return a value. The default value is completely /// ignored by this function. pub fn decode_len>(key: KeyArg) -> Option where @@ -295,7 +295,8 @@ where /// /// # Warning /// - /// - `None` does not mean that `get()` does not return a value. The default value is completly + /// - `None` does not mean that `get()` does not return a value. The default value is + /// completely /// ignored by this function. /// /// - The value returned is the non-deduplicated length of the underlying Vector in storage.This diff --git a/substrate/frame/support/src/storage/types/mod.rs b/substrate/frame/support/src/storage/types/mod.rs index 9dd6f4066e432068b97899e94017bb06fe0dc014..631410f425d17a169f16cd0380a3ffa3f22f6611 100644 --- a/substrate/frame/support/src/storage/types/mod.rs +++ b/substrate/frame/support/src/storage/types/mod.rs @@ -195,7 +195,7 @@ mod test { // result query returns error assert_eq!(C::get(), Err(())); - // value query with custom onempty returns 42 + // value query with custom on empty returns 42 assert_eq!(D::get(), 42); }); } diff --git a/substrate/frame/support/src/storage/types/nmap.rs b/substrate/frame/support/src/storage/types/nmap.rs index 0723db68900273ff729e8d7cceb4e408e18a00e3..253f02a14f0796f7ecba7e81af8e5be7b7ec0e42 100755 --- a/substrate/frame/support/src/storage/types/nmap.rs +++ b/substrate/frame/support/src/storage/types/nmap.rs @@ -348,7 +348,7 @@ where /// /// # Warning /// - /// `None` does not mean that `get()` does not return a value. The default value is completly + /// `None` does not mean that `get()` does not return a value. The default value is completely /// ignored by this function. pub fn decode_len + TupleToEncodedIter>( key: KArg, diff --git a/substrate/frame/support/src/storage/types/value.rs b/substrate/frame/support/src/storage/types/value.rs index 263091dd25237da759c54a0a7668306e5850aec9..a2d93a6a165ffca205f1597dfa0952c9c4900313 100644 --- a/substrate/frame/support/src/storage/types/value.rs +++ b/substrate/frame/support/src/storage/types/value.rs @@ -225,7 +225,7 @@ where /// /// # Warning /// - /// `None` does not mean that `get()` does not return a value. The default value is completly + /// `None` does not mean that `get()` does not return a value. The default value is completely /// ignored by this function. pub fn decode_len() -> Option where @@ -243,7 +243,8 @@ where /// /// # Warning /// - /// - `None` does not mean that `get()` does not return a value. The default value is completly + /// - `None` does not mean that `get()` does not return a value. The default value is + /// completely /// ignored by this function. /// /// - The value returned is the non-deduplicated length of the underlying Vector in storage.This diff --git a/substrate/frame/support/src/traits.rs b/substrate/frame/support/src/traits.rs index 1997d8fc223efe7ff1d1be17e2bc10fb973d2cb9..66777cef7b8e81960c1169c2fb1fd1033e58c4a2 100644 --- a/substrate/frame/support/src/traits.rs +++ b/substrate/frame/support/src/traits.rs @@ -22,8 +22,8 @@ pub mod tokens; pub use tokens::{ currency::{ - ActiveIssuanceOf, Currency, LockIdentifier, LockableCurrency, NamedReservableCurrency, - ReservableCurrency, TotalIssuanceOf, VestingSchedule, + ActiveIssuanceOf, Currency, InspectLockableCurrency, LockIdentifier, LockableCurrency, + NamedReservableCurrency, ReservableCurrency, TotalIssuanceOf, VestingSchedule, }, fungible, fungibles, imbalance::{Imbalance, OnUnbalanced, SignedImbalance}, @@ -87,7 +87,7 @@ pub use hooks::GenesisBuild; pub use hooks::{ BeforeAllRuntimeMigrations, BuildGenesisConfig, Hooks, IntegrityTest, OnFinalize, OnGenesis, OnIdle, OnInitialize, OnPoll, OnRuntimeUpgrade, OnTimestampSet, PostInherents, - PostTransactions, PreInherents, + PostTransactions, PreInherents, UncheckedOnRuntimeUpgrade, }; pub mod schedule; diff --git a/substrate/frame/support/src/traits/dispatch.rs b/substrate/frame/support/src/traits/dispatch.rs index de50ce7a26c211190ddcb81976d39b9779540756..7dc8d3e4f5a6e954c5eb726b257f1d2d73a29dc6 100644 --- a/substrate/frame/support/src/traits/dispatch.rs +++ b/substrate/frame/support/src/traits/dispatch.rs @@ -540,7 +540,7 @@ pub trait OriginTrait: Sized { }) } - /// Extract a reference to the sytsem origin, if that's what the caller is. + /// Extract a reference to the system origin, if that's what the caller is. fn as_system_ref(&self) -> Option<&RawOrigin> { self.caller().as_system_ref() } diff --git a/substrate/frame/support/src/traits/dynamic_params.rs b/substrate/frame/support/src/traits/dynamic_params.rs index 8881df04141cc13a97f79bc6df3ed41b8db0323d..32dae6799eaf7c9061851896ada9fe4e8a1a3e0d 100644 --- a/substrate/frame/support/src/traits/dynamic_params.rs +++ b/substrate/frame/support/src/traits/dynamic_params.rs @@ -25,30 +25,30 @@ use frame_support::Parameter; /// A dynamic parameter store across an aggregated KV type. pub trait RuntimeParameterStore { - type AggregratedKeyValue: AggregratedKeyValue; + type AggregatedKeyValue: AggregatedKeyValue; /// Get the value of a parametrized key. /// /// Should return `None` if no explicit value was set instead of a default. fn get(key: K) -> Option where - KV: AggregratedKeyValue, - K: Key + Into<::Key>, - ::Key: IntoKey< - <::AggregratedKeyValue as AggregratedKeyValue>::Key, + KV: AggregatedKeyValue, + K: Key + Into<::Key>, + ::Key: IntoKey< + <::AggregatedKeyValue as AggregatedKeyValue>::Key, >, - <::AggregratedKeyValue as AggregratedKeyValue>::Value: - TryIntoKey<::Value>, - ::Value: TryInto; + <::AggregatedKeyValue as AggregatedKeyValue>::Value: + TryIntoKey<::Value>, + ::Value: TryInto; } /// A dynamic parameter store across a concrete KV type. -pub trait ParameterStore { +pub trait ParameterStore { /// Get the value of a parametrized key. fn get(key: K) -> Option where - K: Key + Into<::Key>, - ::Value: TryInto; + K: Key + Into<::Key>, + ::Value: TryInto; } /// Key of a dynamic parameter. @@ -61,7 +61,7 @@ pub trait Key { } /// The aggregated key-value type of a dynamic parameter store. -pub trait AggregratedKeyValue: Parameter { +pub trait AggregatedKeyValue: Parameter { /// The aggregated key type. type Key: Parameter + MaxEncodedLen; @@ -72,7 +72,7 @@ pub trait AggregratedKeyValue: Parameter { fn into_parts(self) -> (Self::Key, Option); } -impl AggregratedKeyValue for () { +impl AggregatedKeyValue for () { type Key = (); type Value = (); @@ -90,17 +90,17 @@ pub struct ParameterStoreAdapter(sp_std::marker::PhantomData<(PS, KV)>); impl ParameterStore for ParameterStoreAdapter where PS: RuntimeParameterStore, - KV: AggregratedKeyValue, - ::Key: - IntoKey<<::AggregratedKeyValue as AggregratedKeyValue>::Key>, - ::Value: TryFromKey< - <::AggregratedKeyValue as AggregratedKeyValue>::Value, + KV: AggregatedKeyValue, + ::Key: + IntoKey<<::AggregatedKeyValue as AggregatedKeyValue>::Key>, + ::Value: TryFromKey< + <::AggregatedKeyValue as AggregatedKeyValue>::Value, >, { fn get(key: K) -> Option where - K: Key + Into<::Key>, - ::Value: TryInto, + K: Key + Into<::Key>, + ::Value: TryInto, { PS::get::(key) } diff --git a/substrate/frame/support/src/traits/hooks.rs b/substrate/frame/support/src/traits/hooks.rs index 7d0e5aa1e89ca1d76e81d526881041c09c6e6cf3..ccccc5063286602538c0c46293ffabe8ad01181a 100644 --- a/substrate/frame/support/src/traits/hooks.rs +++ b/substrate/frame/support/src/traits/hooks.rs @@ -167,7 +167,7 @@ pub trait OnGenesis { /// /// This hook is intended to be used internally in FRAME and not be exposed to FRAME developers. /// -/// It is defined as a seperate trait from [`OnRuntimeUpgrade`] precisely to not pollute the public +/// It is defined as a separate trait from [`OnRuntimeUpgrade`] precisely to not pollute the public /// API. pub trait BeforeAllRuntimeMigrations { /// Something that should happen before runtime migrations are executed. @@ -227,6 +227,30 @@ pub trait OnRuntimeUpgrade { } } +/// This trait is intended for use within `VersionedMigration` to execute storage migrations without +/// automatic version checks. Implementations should ensure migration logic is safe and idempotent. +pub trait UncheckedOnRuntimeUpgrade { + /// Called within `VersionedMigration` to execute the actual migration. It is also + /// expected that no version checks are performed within this function. + /// + /// See also [`Hooks::on_runtime_upgrade`]. + fn on_runtime_upgrade() -> Weight { + Weight::zero() + } + + /// See [`Hooks::pre_upgrade`]. + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, TryRuntimeError> { + Ok(Vec::new()) + } + + /// See [`Hooks::post_upgrade`]. + #[cfg(feature = "try-runtime")] + fn post_upgrade(_state: Vec) -> Result<(), TryRuntimeError> { + Ok(()) + } +} + #[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] #[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] #[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] @@ -459,7 +483,9 @@ pub trait Hooks { /// ## Implementation Note: Standalone Migrations /// /// Additional migrations can be created by directly implementing [`OnRuntimeUpgrade`] on - /// structs and passing them to `Executive`. + /// structs and passing them to `Executive`. Or alternatively, by implementing + /// [`UncheckedOnRuntimeUpgrade`], passing it to [`crate::migrations::VersionedMigration`], + /// which already implements [`OnRuntimeUpgrade`]. /// /// ## Implementation Note: Pallet Versioning /// diff --git a/substrate/frame/support/src/traits/misc.rs b/substrate/frame/support/src/traits/misc.rs index 1f634a642829c1d6a3e72e0e76ef25bdf2bef55a..bc7407a7be6248f25866a78f3b95bb0107ff19ae 100644 --- a/substrate/frame/support/src/traits/misc.rs +++ b/substrate/frame/support/src/traits/misc.rs @@ -1424,7 +1424,7 @@ mod test { assert_eq!(>::max_encoded_len(), 2usize.pow(14) + 4); let data = 4u64; - // Ensure that we check that the `Vec` is consumed completly on decode. + // Ensure that we check that the `Vec` is consumed completely on decode. assert!(WrapperOpaque::::decode(&mut &data.encode().encode()[..]).is_err()); } diff --git a/substrate/frame/support/src/traits/schedule.rs b/substrate/frame/support/src/traits/schedule.rs index 7a7d1357da1e7dba9588866d07d734f2a8fedda8..f41c73fe69a8883d8d2ee8bb7ffcdf72bc2e5ba2 100644 --- a/substrate/frame/support/src/traits/schedule.rs +++ b/substrate/frame/support/src/traits/schedule.rs @@ -130,7 +130,7 @@ impl MaybeHashed { } } -// TODO: deprecate +#[deprecated(note = "Use `v3` instead. Will be removed after September 2024.")] pub mod v1 { use super::*; @@ -218,10 +218,12 @@ pub mod v1 { fn next_dispatch_time(id: Vec) -> Result; } + #[allow(deprecated)] impl Anon for T where T: v2::Anon, { + #[allow(deprecated)] type Address = T::Address; fn schedule( @@ -232,10 +234,13 @@ pub mod v1 { call: Call, ) -> Result { let c = MaybeHashed::::Value(call); + + #[allow(deprecated)] T::schedule(when, maybe_periodic, priority, origin, c) } fn cancel(address: Self::Address) -> Result<(), ()> { + #[allow(deprecated)] T::cancel(address) } @@ -243,18 +248,22 @@ pub mod v1 { address: Self::Address, when: DispatchTime, ) -> Result { + #[allow(deprecated)] T::reschedule(address, when) } fn next_dispatch_time(address: Self::Address) -> Result { + #[allow(deprecated)] T::next_dispatch_time(address) } } + #[allow(deprecated)] impl Named for T where T: v2::Named, { + #[allow(deprecated)] type Address = T::Address; fn schedule_named( @@ -266,10 +275,12 @@ pub mod v1 { call: Call, ) -> Result { let c = MaybeHashed::::Value(call); + #[allow(deprecated)] T::schedule_named(id, when, maybe_periodic, priority, origin, c) } fn cancel_named(id: Vec) -> Result<(), ()> { + #[allow(deprecated)] T::cancel_named(id) } @@ -277,16 +288,18 @@ pub mod v1 { id: Vec, when: DispatchTime, ) -> Result { + #[allow(deprecated)] T::reschedule_named(id, when) } fn next_dispatch_time(id: Vec) -> Result { + #[allow(deprecated)] T::next_dispatch_time(id) } } } -// TODO: deprecate +#[deprecated(note = "Use `v3` instead. Will be removed after September 2024.")] pub mod v2 { use super::*; @@ -478,4 +491,5 @@ pub mod v3 { } } +#[allow(deprecated)] pub use v1::*; diff --git a/substrate/frame/support/src/traits/tokens/currency.rs b/substrate/frame/support/src/traits/tokens/currency.rs index 8b773115011de27b9920de70860491ec404a54e1..b3db4c98001d900522b33c6a25e3e9b9119c3ea5 100644 --- a/substrate/frame/support/src/traits/tokens/currency.rs +++ b/substrate/frame/support/src/traits/tokens/currency.rs @@ -16,6 +16,9 @@ // limitations under the License. //! The Currency trait and associated types. +//! +//! Note Currency and related traits are deprecated, instead +//! [`fungible`](frame_support::traits::fungible) traits should be used. use super::{ imbalance::{Imbalance, SignedImbalance}, @@ -27,7 +30,7 @@ use sp_runtime::{traits::MaybeSerializeDeserialize, DispatchError}; mod reservable; pub use reservable::{NamedReservableCurrency, ReservableCurrency}; mod lockable; -pub use lockable::{LockIdentifier, LockableCurrency, VestingSchedule}; +pub use lockable::{InspectLockableCurrency, LockIdentifier, LockableCurrency, VestingSchedule}; /// Abstraction over a fungible assets system. pub trait Currency { diff --git a/substrate/frame/support/src/traits/tokens/currency/lockable.rs b/substrate/frame/support/src/traits/tokens/currency/lockable.rs index 955814f5aa9de3e8767cc9632e6a23802ed0e94f..51a48dd15ce85c8f41393164f42ca347d0114d1d 100644 --- a/substrate/frame/support/src/traits/tokens/currency/lockable.rs +++ b/substrate/frame/support/src/traits/tokens/currency/lockable.rs @@ -64,6 +64,12 @@ pub trait LockableCurrency: Currency { fn remove_lock(id: LockIdentifier, who: &AccountId); } +/// A inspect interface for a currency whose accounts can have liquidity restrictions. +pub trait InspectLockableCurrency: LockableCurrency { + /// Amount of funds locked for `who` associated with `id`. + fn balance_locked(id: LockIdentifier, who: &AccountId) -> Self::Balance; +} + /// A vesting schedule over a currency. This allows a particular currency to have vesting limits /// applied to it. pub trait VestingSchedule { diff --git a/substrate/frame/support/src/traits/tokens/fungible/conformance_tests/regular/unbalanced.rs b/substrate/frame/support/src/traits/tokens/fungible/conformance_tests/regular/unbalanced.rs index e7fcc15472e05b0600f5bfda2f6a1d524a8afbc3..a522073689879385941cfaeebe25675d3b253127 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/conformance_tests/regular/unbalanced.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/conformance_tests/regular/unbalanced.rs @@ -94,7 +94,7 @@ where ); assert_eq!(T::balance(&account_0), account_0_initial_balance - amount); - // Decreasing the balance below funds avalibale should fail when Precision::Exact + // Decreasing the balance below funds available should fail when Precision::Exact let balance_before = T::balance(&account_0); assert_eq!( T::decrease_balance( diff --git a/substrate/frame/support/src/traits/tokens/fungible/freeze.rs b/substrate/frame/support/src/traits/tokens/fungible/freeze.rs index 8b542ee4c6060e143ee58902a38e39cca38b109b..96efbc6ab89e154a57d88d75c65ab8c330af41eb 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/freeze.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/freeze.rs @@ -16,6 +16,9 @@ // limitations under the License. //! The traits for putting freezes within a single fungible token class. +//! +//! See the [`crate::traits::fungible`] doc for more information about fungible traits +//! including the place of the Freezes in FRAME. use scale_info::TypeInfo; use sp_arithmetic::{ @@ -35,7 +38,7 @@ pub trait Inspect: super::Inspect { /// An identifier for a freeze. type Id: codec::Encode + TypeInfo + 'static; - /// Amount of funds held in reserve by `who` for the given `id`. + /// Amount of funds frozen in reserve by `who` for the given `id`. fn balance_frozen(id: &Self::Id, who: &AccountId) -> Self::Balance; /// The amount of the balance which can become frozen. Defaults to `total_balance()`. @@ -45,11 +48,11 @@ pub trait Inspect: super::Inspect { /// Returns `true` if it's possible to introduce a freeze for the given `id` onto the /// account of `who`. This will be true as long as the implementor supports as many - /// concurrent freeze locks as there are possible values of `id`. + /// concurrent freezes as there are possible values of `id`. fn can_freeze(id: &Self::Id, who: &AccountId) -> bool; } -/// Trait for introducing, altering and removing locks to freeze an account's funds so they never +/// Trait for introducing, altering and removing freezes for an account for its funds never /// go below a set minimum. pub trait Mutate: Inspect { /// Prevent actions which would reduce the balance of the account of `who` below the given @@ -66,16 +69,16 @@ pub trait Mutate: Inspect { /// counteract any pre-existing freezes in place for `who` under the `id`. Also unlike /// `set_freeze`, in the case that `amount` is zero, this is no-op and never fails. /// - /// Note that more funds can be locked than the total balance, if desired. + /// Note that more funds can be frozen than the total balance, if desired. fn extend_freeze(id: &Self::Id, who: &AccountId, amount: Self::Balance) -> DispatchResult; - /// Remove an existing lock. + /// Remove an existing freeze. fn thaw(id: &Self::Id, who: &AccountId) -> DispatchResult; /// Attempt to alter the amount frozen under the given `id` to `amount`. /// /// Fail if the account of `who` has fewer freezable funds than `amount`, unless `fortitude` is - /// `Fortitude::Force`. + /// [`Fortitude::Force`]. fn set_frozen( id: &Self::Id, who: &AccountId, @@ -91,7 +94,7 @@ pub trait Mutate: Inspect { /// the amount frozen under `id`. Do nothing otherwise. /// /// Fail if the account of `who` has fewer freezable funds than `amount`, unless `fortitude` is - /// `Fortitude::Force`. + /// [`Fortitude::Force`]. fn ensure_frozen( id: &Self::Id, who: &AccountId, diff --git a/substrate/frame/support/src/traits/tokens/fungible/hold.rs b/substrate/frame/support/src/traits/tokens/fungible/hold.rs index 6da652d2998dfc18dd7da5754da7faf22abab839..28ece25c91d427146354e3ad91257e01cd9c4cb5 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/hold.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/hold.rs @@ -16,6 +16,9 @@ // limitations under the License. //! The traits for putting holds within a single fungible token class. +//! +//! See the [`crate::traits::fungible`] doc for more information about fungible traits +//! including the place of the Holds in FRAME. use crate::{ ensure, @@ -214,8 +217,8 @@ pub trait Mutate: /// /// The actual amount released is returned with `Ok`. /// - /// If `precision` is `BestEffort`, then the amount actually unreserved and returned as the - /// inner value of `Ok` may be smaller than the `amount` passed. + /// If `precision` is [`Precision::BestEffort`], then the amount actually unreserved and + /// returned as the inner value of `Ok` may be smaller than the `amount` passed. /// /// NOTE! The inner of the `Ok` result variant returns the *actual* amount released. This is the /// opposite of the `ReservableCurrency::unreserve()` result, which gives the amount not able diff --git a/substrate/frame/support/src/traits/tokens/fungible/imbalance.rs b/substrate/frame/support/src/traits/tokens/fungible/imbalance.rs index 0e25102197007e7ce3f38eef7417425f09dbcd2f..020dffe28c85b7a0f1bb213173342e4dd9e907d7 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/imbalance.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/imbalance.rs @@ -17,6 +17,8 @@ //! The imbalance type and its associates, which handles keeps everything adding up properly with //! unbalanced operations. +//! +//! See the [`crate::traits::fungible`] doc for more information about fungible traits. use super::{super::Imbalance as ImbalanceT, Balanced, *}; use crate::traits::{ diff --git a/substrate/frame/support/src/traits/tokens/fungible/item_of.rs b/substrate/frame/support/src/traits/tokens/fungible/item_of.rs index 37749d396009d8a11629d3c1f94c6ce5efbe6f21..5374cc52bab72a25270606e89f97a6f035f3a704 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/item_of.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/item_of.rs @@ -16,6 +16,11 @@ // limitations under the License. //! Adapter to use `fungibles::*` implementations as `fungible::*`. +//! +//! This allows for a `fungibles` asset, e.g. from the `pallet_assets` pallet, to be used when a +//! `fungible` asset is expected. +//! +//! See the [`crate::traits::fungible`] doc for more information about fungible traits. use super::*; use crate::traits::{ diff --git a/substrate/frame/support/src/traits/tokens/fungible/mod.rs b/substrate/frame/support/src/traits/tokens/fungible/mod.rs index ba4a2e5e21a2c9f5115ca6331e85df0fea58bb74..4a0cda2dbc7b7a8ae20dedae071cbf726a31a0fc 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/mod.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/mod.rs @@ -17,26 +17,135 @@ //! The traits for dealing with a single fungible token class and any associated types. //! -//! ### User-implememted traits -//! - `Inspect`: Regular balance inspector functions. -//! - `Unbalanced`: Low-level balance mutating functions. Does not guarantee proper book-keeping and -//! so should not be called into directly from application code. Other traits depend on this and -//! provide default implementations based on it. -//! - `UnbalancedHold`: Low-level balance mutating functions for balances placed on hold. Does not +//! Also see the [`frame_tokens`] reference docs for more information about the place of +//! `fungible` traits in Substrate. +//! +//! # Avaliable Traits +//! - [`Inspect`]: Regular balance inspector functions. +//! - [`Unbalanced`]: Low-level balance mutating functions. Does not guarantee proper book-keeping +//! and so should not be called into directly from application code. Other traits depend on this +//! and provide default implementations based on it. +//! - [`UnbalancedHold`]: Low-level balance mutating functions for balances placed on hold. Does not //! guarantee proper book-keeping and so should not be called into directly from application code. //! Other traits depend on this and provide default implementations based on it. -//! - `Mutate`: Regular balance mutator functions. Pre-implemented using `Unbalanced`, though the -//! `done_*` functions should likely be reimplemented in case you want to do something following -//! the operation such as emit events. -//! - `InspectHold`: Inspector functions for balances on hold. -//! - `MutateHold`: Mutator functions for balances on hold. Mostly pre-implemented using -//! `UnbalancedHold`. -//! - `InspectFreeze`: Inspector functions for frozen balance. -//! - `MutateFreeze`: Mutator functions for frozen balance. -//! - `Balanced`: One-sided mutator functions for regular balances, which return imbalance objects +//! - [`Mutate`]: Regular balance mutator functions. Pre-implemented using [`Unbalanced`], though +//! the `done_*` functions should likely be reimplemented in case you want to do something +//! following the operation such as emit events. +//! - [`InspectHold`]: Inspector functions for balances on hold. +//! - [`MutateHold`]: Mutator functions for balances on hold. Mostly pre-implemented using +//! [`UnbalancedHold`]. +//! - [`InspectFreeze`]: Inspector functions for frozen balance. +//! - [`MutateFreeze`]: Mutator functions for frozen balance. +//! - [`Balanced`]: One-sided mutator functions for regular balances, which return imbalance objects //! which guarantee eventual book-keeping. May be useful for some sophisticated operations where //! funds must be removed from an account before it is known precisely what should be done with //! them. +//! +//! ## Terminology +//! +//! - **Total Issuance**: The total number of units in existence in a system. +//! +//! - **Total Balance**: The sum of an account's free and held balances. +//! +//! - **Free Balance**: A portion of an account's total balance that is not held. Note this is +//! distinct from the Spendable Balance, which represents how much Balance the user can actually +//! transfer. +//! +//! - **Held Balance**: Held balance still belongs to the account holder, but is suspended. It can +//! be slashed, but only after all the free balance has been slashed. +//! +//! Multiple holds stack rather than overlay. This means that if an account has +//! 3 holds for 100 units, the account can spend its funds for any reason down to 300 units, at +//! which point the holds will start to come into play. +//! +//! - **Frozen Balance**: A freeze on a specified amount of an account's free balance until a +//! specified block number. +//! +//! Multiple freezes always operate over the same funds, so they "overlay" rather than +//! "stack". This means that if an account has 3 freezes for 100 units, the account can spend its +//! funds for any reason down to 100 units, at which point the freezes will start to come into +//! play. +//! +//! - **Minimum Balance (a.k.a. Existential Deposit, a.k.a. ED)**: The minimum balance required to +//! create or keep an account open. This is to prevent "dust accounts" from filling storage. When +//! the free plus the held balance (i.e. the total balance) falls below this, then the account is +//! said to be dead. It loses its functionality as well as any prior history and all information +//! on it is removed from the chain's state. No account should ever have a total balance that is +//! strictly between 0 and the existential deposit (exclusive). If this ever happens, it indicates +//! either a bug in the implementation of this trait or an erroneous raw mutation of storage. +//! +//! - **Untouchable Balance**: The part of a user's free balance they cannot spend, due to ED or +//! Freeze(s). +//! +//! - **Spendable Balance**: The part of a user's free balance they can actually transfer, after +//! accounting for Holds and Freezes. +//! +//! - **Imbalance**: A condition when some funds were credited or debited without equal and opposite +//! accounting (i.e. a difference between total issuance and account balances). Functions that +//! result in an imbalance will return an object of the [`imbalance::Credit`] or +//! [`imbalance::Debt`] traits that can be managed within your runtime logic. +//! +//! If an imbalance is simply dropped, it should automatically maintain any book-keeping such as +//! total issuance. +//! +//! ## Visualising Balance Components Together 💫 +//! +//! ```ignore +//! |__total__________________________________| +//! |__on_hold__|_____________free____________| +//! |__________frozen___________| +//! |__on_hold__|__ed__| +//! |__untouchable__|__spendable__| +//! ``` +//! +//! ## Holds and Freezes +//! +//! Both holds and freezes are used to prevent an account from using some of its balance. +//! +//! The primary distinction between the two are that: +//! - Holds are cumulative (do not overlap) and are distinct from the free balance +//! - Freezes are not cumulative, and can overlap with each other or with holds +//! +//! ```ignore +//! |__total_____________________________| +//! |__hold_a__|__hold_b__|_____free_____| +//! |__on_hold____________| // <- the sum of all holds +//! |__freeze_a_______________| +//! |__freeze_b____| +//! |__freeze_c________| +//! |__frozen_________________| // <- the max of all freezes +//! ``` +//! +//! Holds are designed to be infallibly slashed, meaning that any logic using a `Freeze` +//! must handle the possibility of the frozen amount being reduced, potentially to zero. A +//! permissionless function should be provided in order to allow bookkeeping to be updated in this +//! instance. E.g. some balance is frozen when it is used for voting, one could use held balance for +//! voting, but nothing prevents this frozen balance from being reduced if the overlapping hold is +//! slashed. +//! +//! Every Hold and Freeze is accompanied by a unique `Reason`, making it clear for each instance +//! what the originating pallet and purpose is. These reasons are amalgomated into a single enum +//! `RuntimeHoldReason` and `RuntimeFreezeReason` respectively, when the runtime is compiled. +//! +//! Note that `Hold` and `Freeze` reasons should remain in your runtime for as long as storage +//! could exist in your runtime with those reasons, otherwise your runtime state could become +//! undecodable. +//! +//! ### Should I use a Hold or Freeze? +//! +//! If you require a balance to be infaillibly slashed, then you should use Holds. +//! +//! If you require setting a minimum account balance amount, then you should use a Freezes. Note +//! Freezes do not carry the same guarantees as Holds. Although the account cannot voluntarily +//! reduce their balance below the largest freeze, if Holds on the account are slashed then the +//! balance could drop below the freeze amount. +//! +//! ## Sets of Tokens +//! +//! For managing sets of tokens, see the [`fungibles`](`frame_support::traits::fungibles`) trait +//! which is a wrapper around this trait but supporting multiple asset instances. +//! +//! [`frame_tokens`]: ../../../../polkadot_sdk_docs/reference_docs/frame_tokens/index.html pub mod conformance_tests; pub mod freeze; diff --git a/substrate/frame/support/src/traits/tokens/fungible/regular.rs b/substrate/frame/support/src/traits/tokens/fungible/regular.rs index 0157b08bd137b2813236f34e193799cf8267b88a..4ed31dcf9fb1f099ebc576ae981d289613775b60 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/regular.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/regular.rs @@ -16,6 +16,8 @@ // limitations under the License. //! `Inspect` and `Mutate` traits for working with regular balances. +//! +//! See the [`crate::traits::fungible`] doc for more information about fungible traits. use crate::{ ensure, diff --git a/substrate/frame/support/src/traits/tokens/fungible/union_of.rs b/substrate/frame/support/src/traits/tokens/fungible/union_of.rs index 33711d7a16cc6e7fe40eb619f5b4aba4703163df..575b771a614480147d5afe878dc71652d278bb67 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/union_of.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/union_of.rs @@ -17,6 +17,8 @@ //! Types to combine some `fungible::*` and `fungibles::*` implementations into one union //! `fungibles::*` implementation. +//! +//! See the [`crate::traits::fungible`] doc for more information about fungible traits. use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::traits::{ diff --git a/substrate/frame/support/src/traits/tokens/fungibles/approvals.rs b/substrate/frame/support/src/traits/tokens/fungibles/approvals.rs index 7a80279b01981420840811e550c92c106a682bef..09e3d20756a2fbb3e1dd8c08b17b38c44f82c28c 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/approvals.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/approvals.rs @@ -16,6 +16,8 @@ // limitations under the License. //! Inspect and Mutate traits for Asset approvals +//! +//! See the [`crate::traits::fungibles`] doc for more information about fungibles traits. use crate::dispatch::DispatchResult; pub trait Inspect: super::Inspect { diff --git a/substrate/frame/support/src/traits/tokens/fungibles/enumerable.rs b/substrate/frame/support/src/traits/tokens/fungibles/enumerable.rs index 08bb784a7dbf63fd39c51f0468cfe42cbcb16248..81dbb93b0b8d63fafcbd80ab64ad3aec53c40bab 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/enumerable.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/enumerable.rs @@ -15,6 +15,10 @@ // See the License for the specific language governing permissions and // limitations under the License. +//! Contains an interface for enumerating assets in existence or owned by a given account. +//! +//! See the [`crate::traits::fungibles`] doc for more information about fungibles traits. + /// Interface for enumerating assets in existence or owned by a given account. pub trait Inspect: super::Inspect { type AssetsIterator; diff --git a/substrate/frame/support/src/traits/tokens/fungibles/freeze.rs b/substrate/frame/support/src/traits/tokens/fungibles/freeze.rs index b07d20d6c41ce48c3f4face0ea4a213a3023f5e3..244f700589945d97d1d5ed2abc8326bded127d73 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/freeze.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/freeze.rs @@ -16,6 +16,8 @@ // limitations under the License. //! The traits for putting freezes within a single fungible token class. +//! +//! See the [`crate::traits::fungibles`] doc for more information about fungibles traits. use crate::{ensure, traits::tokens::Fortitude}; use scale_info::TypeInfo; diff --git a/substrate/frame/support/src/traits/tokens/fungibles/hold.rs b/substrate/frame/support/src/traits/tokens/fungibles/hold.rs index 1efd1594213c1a80eeccdd4fdf0c9f01c812794c..ef3fef7a300d96b5c741983fd0b19297b44f541c 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/hold.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/hold.rs @@ -16,6 +16,8 @@ // limitations under the License. //! The traits for putting holds within a single fungible token class. +//! +//! See the [`crate::traits::fungibles`] doc for more information about fungibles traits. use crate::{ ensure, diff --git a/substrate/frame/support/src/traits/tokens/fungibles/imbalance.rs b/substrate/frame/support/src/traits/tokens/fungibles/imbalance.rs index 54c1e900b6e3664b6fbf3470e41d17eaae37e635..bb0d83721a481e11f80f1ae9fa75292528f7c8a0 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/imbalance.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/imbalance.rs @@ -17,6 +17,8 @@ //! The imbalance type and its associates, which handles keeps everything adding up properly with //! unbalanced operations. +//! +//! See the [`crate::traits::fungibles`] doc for more information about fungibles traits. use super::*; use crate::traits::{ diff --git a/substrate/frame/support/src/traits/tokens/fungibles/lifetime.rs b/substrate/frame/support/src/traits/tokens/fungibles/lifetime.rs index 0e195a52318b7ddb34aef57e3d8742cd36952e06..49f6c846ccdd54adeceb668bb10a8cb2ca11281f 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/lifetime.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/lifetime.rs @@ -16,6 +16,8 @@ // limitations under the License. //! Traits for creating and destroying assets. +//! +//! See the [`crate::traits::fungibles`] doc for more information about fungibles traits. use sp_runtime::{DispatchError, DispatchResult}; diff --git a/substrate/frame/support/src/traits/tokens/fungibles/metadata.rs b/substrate/frame/support/src/traits/tokens/fungibles/metadata.rs index ab310119e58467da21dc9f68c84ad8dd173e8f43..ab722426dadf6fa759340ac12609e58c1139ff02 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/metadata.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/metadata.rs @@ -16,6 +16,8 @@ // limitations under the License. //! Inspect and Mutate traits for Asset metadata +//! +//! See the [`crate::traits::fungibles`] doc for more information about fungibles traits. use crate::dispatch::DispatchResult; use sp_std::vec::Vec; diff --git a/substrate/frame/support/src/traits/tokens/fungibles/mod.rs b/substrate/frame/support/src/traits/tokens/fungibles/mod.rs index 1db0706ba4fde4c3b53dbcdd5ea850a37752b1e9..2122fdeaed3f28b937fa16a2230fa0f9a56d869a 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/mod.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/mod.rs @@ -15,7 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! The traits for sets of fungible tokens and any associated types. +//! The traits for *sets* of [`fungible`](`frame_support::traits::fungible`) tokens and any +//! associated types. +//! +//! Individual tokens in the `fungibles` set may be used when a `fungible` trait is expected using +//! [`crate::traits::tokens::fungible::ItemOf`]. +//! +//! Also see the [`frame_tokens`] reference docs for more information about the place of +//! `fungible` traits in Substrate. +//! +//! [`frame_tokens`]: ../../../../polkadot_sdk_docs/reference_docs/frame_tokens/index.html pub mod approvals; mod enumerable; diff --git a/substrate/frame/support/src/traits/tokens/fungibles/regular.rs b/substrate/frame/support/src/traits/tokens/fungibles/regular.rs index 8cc97802da66e5fa192a3720cfc6381c8930818f..b30e0ae3a2a3657c1a2685fa413ff120803ae187 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/regular.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/regular.rs @@ -16,6 +16,8 @@ // limitations under the License. //! `Inspect` and `Mutate` traits for working with regular balances. +//! +//! See the [`crate::traits::fungibles`] doc for more information about fungibles traits. use sp_std::marker::PhantomData; diff --git a/substrate/frame/support/src/traits/tokens/fungibles/roles.rs b/substrate/frame/support/src/traits/tokens/fungibles/roles.rs index 5cd1228afbce72542aae333759968d5ddee6fac3..4f95ad8368c2c555e1a07f1f1b8b9ede7cf574cb 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/roles.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/roles.rs @@ -16,6 +16,8 @@ // limitations under the License. //! Inspect traits for Asset roles +//! +//! See the [`crate::traits::fungibles`] doc for more information about fungibles traits. pub trait Inspect: super::Inspect { // Get owner for an AssetId. diff --git a/substrate/frame/support/src/traits/tokens/fungibles/union_of.rs b/substrate/frame/support/src/traits/tokens/fungibles/union_of.rs index 9d2a783df2a4af0c593c784d4c4d794b0311c392..c8a1ec37e78184f36342acefd65dd1815792b257 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/union_of.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/union_of.rs @@ -16,6 +16,8 @@ // limitations under the License. //! Type to combine two `fungibles::*` implementations into one union `fungibles::*` implementation. +//! +//! See the [`crate::traits::fungibles`] doc for more information about fungibles traits. use frame_support::traits::{ tokens::{ diff --git a/substrate/frame/support/src/traits/tokens/imbalance/split_two_ways.rs b/substrate/frame/support/src/traits/tokens/imbalance/split_two_ways.rs index c1afac35fc93c4ab9263a190c0adca44adad3ded..59a582389ba61f3eb65fbc7f3710b1edba0a56ab 100644 --- a/substrate/frame/support/src/traits/tokens/imbalance/split_two_ways.rs +++ b/substrate/frame/support/src/traits/tokens/imbalance/split_two_ways.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Means for splitting an imbalance into two and hanlding them differently. +//! Means for splitting an imbalance into two and handling them differently. use super::super::imbalance::{Imbalance, OnUnbalanced}; use sp_runtime::traits::Saturating; diff --git a/substrate/frame/support/src/traits/tokens/misc.rs b/substrate/frame/support/src/traits/tokens/misc.rs index fd497bc4eda6f0b66127b4fc87edacd9be76d56f..a4dd5e4914283e6da028aac038cecee501a2228f 100644 --- a/substrate/frame/support/src/traits/tokens/misc.rs +++ b/substrate/frame/support/src/traits/tokens/misc.rs @@ -130,7 +130,7 @@ impl WithdrawConsequence { pub enum DepositConsequence { /// Deposit couldn't happen due to the amount being too low. This is usually because the /// account doesn't yet exist and the deposit wouldn't bring it to at least the minimum needed - /// for existance. + /// for existence. BelowMinimum, /// Deposit cannot happen since the account cannot be created (usually because it's a consumer /// and there exists no provider reference). diff --git a/substrate/frame/support/test/Cargo.toml b/substrate/frame/support/test/Cargo.toml index 2f12cc00ed9e08560870e6f62b1d1930bad90832..88124e0a43b9cc10438e01e44d98e9f9130ca56c 100644 --- a/substrate/frame/support/test/Cargo.toml +++ b/substrate/frame/support/test/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] static_assertions = "1.1.0" serde = { features = ["derive"], workspace = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-metadata = { version = "16.0.0", default-features = false, features = ["current"] } sp-api = { path = "../../../primitives/api", default-features = false } sp-arithmetic = { path = "../../../primitives/arithmetic", default-features = false } diff --git a/substrate/frame/support/test/compile_pass/Cargo.toml b/substrate/frame/support/test/compile_pass/Cargo.toml index 0617aa105a21f1f3a583404465da97f0af79b176..3f52b4664b187675afd7bd13c4c96db62523a73b 100644 --- a/substrate/frame/support/test/compile_pass/Cargo.toml +++ b/substrate/frame/support/test/compile_pass/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } renamed-frame-support = { package = "frame-support", path = "../..", default-features = false } renamed-frame-system = { package = "frame-system", path = "../../../system", default-features = false } sp-core = { path = "../../../../primitives/core", default-features = false } diff --git a/substrate/frame/support/test/pallet/Cargo.toml b/substrate/frame/support/test/pallet/Cargo.toml index ca889faef876b197cb15a338e0a65b30536b79ec..7a20c3f2730629e709eb78c650b0ca4ca0d3e964 100644 --- a/substrate/frame/support/test/pallet/Cargo.toml +++ b/substrate/frame/support/test/pallet/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], workspace = true } frame-support = { path = "../..", default-features = false } frame-system = { path = "../../../system", default-features = false } diff --git a/substrate/frame/support/test/stg_frame_crate/Cargo.toml b/substrate/frame/support/test/stg_frame_crate/Cargo.toml index 632ea4e794f6f8edb46e57d190be108219935ea0..295b2a1a524721e54b0e04f762f235f5e8a8b742 100644 --- a/substrate/frame/support/test/stg_frame_crate/Cargo.toml +++ b/substrate/frame/support/test/stg_frame_crate/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } frame = { path = "../../..", default-features = false, features = ["experimental", "runtime"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } [features] default = ["std"] diff --git a/substrate/frame/support/test/tests/derive_impl_ui/attached_to_non_impl.rs b/substrate/frame/support/test/tests/derive_impl_ui/attached_to_non_impl.rs index 332e1e78730e526491b8d448858ab3ffbafab2d9..3f3148a03a9fa2b65edc511f3262474babf5bd3f 100644 --- a/substrate/frame/support/test/tests/derive_impl_ui/attached_to_non_impl.rs +++ b/substrate/frame/support/test/tests/derive_impl_ui/attached_to_non_impl.rs @@ -50,7 +50,7 @@ impl Animal for FourLeggedAnimal { } } -pub struct AcquaticMammal {} +pub struct AquaticMammal {} #[derive_impl(FourLeggedAnimal as Animal)] struct Something {} diff --git a/substrate/frame/support/test/tests/derive_impl_ui/bad_default_impl_path.rs b/substrate/frame/support/test/tests/derive_impl_ui/bad_default_impl_path.rs index bc118acdf1e69745a8b890ea27fbd3f4e7375704..7093c32400bd5455c31055eeb700b3a9bfeebffc 100644 --- a/substrate/frame/support/test/tests/derive_impl_ui/bad_default_impl_path.rs +++ b/substrate/frame/support/test/tests/derive_impl_ui/bad_default_impl_path.rs @@ -50,14 +50,14 @@ impl Animal for FourLeggedAnimal { } } -pub struct AcquaticMammal {} +pub struct AquaticMammal {} // Should throw: `error: cannot find macro `__export_tokens_tt_tiger` in this scope` // // Note that there is really no better way to clean up this error, tt_call suffers from the // same downside but this is really the only rough edge when using macro magic. #[derive_impl(Tiger as Animal)] -impl Animal for AcquaticMammal { +impl Animal for AquaticMammal { type Locomotion = (Swims, RunsOnFourLegs); type Environment = (Land, Sea); } diff --git a/substrate/frame/support/test/tests/derive_impl_ui/bad_disambiguation_path.rs b/substrate/frame/support/test/tests/derive_impl_ui/bad_disambiguation_path.rs index 9535ac3deda6b46f85178b7c9062d8cea7adfe94..8e0253e45c17ce86b090540ecae5f22efef54481 100644 --- a/substrate/frame/support/test/tests/derive_impl_ui/bad_disambiguation_path.rs +++ b/substrate/frame/support/test/tests/derive_impl_ui/bad_disambiguation_path.rs @@ -50,10 +50,10 @@ impl Animal for FourLeggedAnimal { } } -pub struct AcquaticMammal {} +pub struct AquaticMammal {} #[derive_impl(FourLeggedAnimal as Insect)] -impl Animal for AcquaticMammal { +impl Animal for AquaticMammal { type Locomotion = (Swims, RunsOnFourLegs); type Environment = (Land, Sea); } diff --git a/substrate/frame/support/test/tests/derive_impl_ui/missing_disambiguation_path.rs b/substrate/frame/support/test/tests/derive_impl_ui/missing_disambiguation_path.rs index f26c28313e5154a3f0a4e66d458375d0ecf59761..4914ceea0b6d0dda7e90d2240f66c122d7fc35b1 100644 --- a/substrate/frame/support/test/tests/derive_impl_ui/missing_disambiguation_path.rs +++ b/substrate/frame/support/test/tests/derive_impl_ui/missing_disambiguation_path.rs @@ -50,10 +50,10 @@ impl Animal for FourLeggedAnimal { } } -pub struct AcquaticMammal {} +pub struct AquaticMammal {} #[derive_impl(FourLeggedAnimal as)] -impl Animal for AcquaticMammal { +impl Animal for AquaticMammal { type Locomotion = (Swims, RunsOnFourLegs); type Environment = (Land, Sea); } diff --git a/substrate/frame/support/test/tests/derive_impl_ui/pass/basic_overriding.rs b/substrate/frame/support/test/tests/derive_impl_ui/pass/basic_overriding.rs index 37c0742f195d786ded121a631dbd384e48aff137..20744b8cba2676a52c7476155381d523ab4355fe 100644 --- a/substrate/frame/support/test/tests/derive_impl_ui/pass/basic_overriding.rs +++ b/substrate/frame/support/test/tests/derive_impl_ui/pass/basic_overriding.rs @@ -51,19 +51,19 @@ impl Animal for FourLeggedAnimal { } } -pub struct AcquaticMammal {} +pub struct AquaticMammal {} // without omitting the `as X` #[derive_impl(FourLeggedAnimal as Animal)] -impl Animal for AcquaticMammal { +impl Animal for AquaticMammal { type Locomotion = (Swims, RunsOnFourLegs); type Environment = (Land, Sea); } -assert_type_eq_all!(::Locomotion, (Swims, RunsOnFourLegs)); -assert_type_eq_all!(::Environment, (Land, Sea)); -assert_type_eq_all!(::Diet, Omnivore); -assert_type_eq_all!(::SleepingStrategy, Diurnal); +assert_type_eq_all!(::Locomotion, (Swims, RunsOnFourLegs)); +assert_type_eq_all!(::Environment, (Land, Sea)); +assert_type_eq_all!(::Diet, Omnivore); +assert_type_eq_all!(::SleepingStrategy, Diurnal); pub struct Lion {} diff --git a/substrate/frame/support/test/tests/pallet_outer_enums_explicit.rs b/substrate/frame/support/test/tests/pallet_outer_enums_explicit.rs index 8e8079b183c85991a046a3572ee54ac2fd64943c..6c71b544426512c54a4320452204b0a4b62e7764 100644 --- a/substrate/frame/support/test/tests/pallet_outer_enums_explicit.rs +++ b/substrate/frame/support/test/tests/pallet_outer_enums_explicit.rs @@ -82,7 +82,7 @@ fn module_error_outer_enum_expand_explicit() { // Check that all error types are propagated match RuntimeError::Example(pallet::Error::InsufficientProposersBalance) { - // Error passed implicitely to the pallet system. + // Error passed implicitly to the pallet system. RuntimeError::System(system) => match system { frame_system::Error::InvalidSpecName => (), frame_system::Error::SpecVersionNeedsToIncrease => (), diff --git a/substrate/frame/support/test/tests/pallet_outer_enums_implicit.rs b/substrate/frame/support/test/tests/pallet_outer_enums_implicit.rs index 08b0b919e2189aca52c8f12387b72e98128ff0e3..79828119742c106763b97096ffaed85acf7381da 100644 --- a/substrate/frame/support/test/tests/pallet_outer_enums_implicit.rs +++ b/substrate/frame/support/test/tests/pallet_outer_enums_implicit.rs @@ -62,11 +62,11 @@ frame_support::construct_runtime!( // Exclude part `Storage` in order not to check its metadata in tests. System: frame_system exclude_parts { Storage }, - // Pallet exposes `Error` implicitely. + // Pallet exposes `Error` implicitly. Example: common::outer_enums::pallet, Instance1Example: common::outer_enums::pallet::, - // Pallet exposes `Error` implicitely. + // Pallet exposes `Error` implicitly. Example2: common::outer_enums::pallet2, Instance1Example2: common::outer_enums::pallet2::, @@ -82,7 +82,7 @@ fn module_error_outer_enum_expand_implicit() { // Check that all error types are propagated match RuntimeError::Example(pallet::Error::InsufficientProposersBalance) { - // Error passed implicitely to the pallet system. + // Error passed implicitly to the pallet system. RuntimeError::System(system) => match system { frame_system::Error::InvalidSpecName => (), frame_system::Error::SpecVersionNeedsToIncrease => (), diff --git a/substrate/frame/support/test/tests/pallet_ui/pass/inherited_call_weight3.rs b/substrate/frame/support/test/tests/pallet_ui/pass/inherited_call_weight3.rs index f40d1040858f5ba47c2ffb5817a02a1035b3a87b..ddccd0b3e192c4d89bc841bfbb2bf2412d82631d 100644 --- a/substrate/frame/support/test/tests/pallet_ui/pass/inherited_call_weight3.rs +++ b/substrate/frame/support/test/tests/pallet_ui/pass/inherited_call_weight3.rs @@ -18,7 +18,7 @@ use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; -// If, for whatever reason, you dont to not use a `WeightInfo` trait - it will still work. +// If, for whatever reason, you don't to not use a `WeightInfo` trait - it will still work. struct Impl; impl Impl { diff --git a/substrate/frame/support/test/tests/versioned_migration.rs b/substrate/frame/support/test/tests/versioned_migration.rs index 3fdfb902129ef8b16d82f7d6e4a16000ce0c61e3..e7d146940cb92f31020e8b27fe014983f50396f0 100644 --- a/substrate/frame/support/test/tests/versioned_migration.rs +++ b/substrate/frame/support/test/tests/versioned_migration.rs @@ -23,7 +23,7 @@ use frame_support::{ construct_runtime, derive_impl, migrations::VersionedMigration, parameter_types, - traits::{GetStorageVersion, OnRuntimeUpgrade, StorageVersion}, + traits::{GetStorageVersion, OnRuntimeUpgrade, StorageVersion, UncheckedOnRuntimeUpgrade}, weights::constants::RocksDbWeight, }; use frame_system::Config; @@ -103,9 +103,11 @@ parameter_types! { static PostUpgradeCalledWith: Vec = Vec::new(); } -/// Implement `OnRuntimeUpgrade` for `SomeUnversionedMigration`. +/// Implement `UncheckedOnRuntimeUpgrade` for `SomeUnversionedMigration`. /// It sets SomeStorage to S, and returns a weight derived from UpgradeReads and UpgradeWrites. -impl OnRuntimeUpgrade for SomeUnversionedMigration { +impl UncheckedOnRuntimeUpgrade + for SomeUnversionedMigration +{ fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { PreUpgradeCalled::set(true); Ok(PreUpgradeReturnBytes::get().to_vec()) diff --git a/substrate/frame/system/Cargo.toml b/substrate/frame/system/Cargo.toml index 416969e9c4776aa161dde65281b7847156bdbf56..16b3b946e22125e6d844020a4a94b788dc4aa270 100644 --- a/substrate/frame/system/Cargo.toml +++ b/substrate/frame/system/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] cfg-if = "1.0" codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } serde = { features = ["alloc", "derive"], workspace = true } frame-support = { path = "../support", default-features = false } sp-core = { path = "../../primitives/core", default-features = false, features = ["serde"] } @@ -28,7 +28,7 @@ sp-runtime = { path = "../../primitives/runtime", default-features = false, feat sp-std = { path = "../../primitives/std", default-features = false } sp-version = { path = "../../primitives/version", default-features = false, features = ["serde"] } sp-weights = { path = "../../primitives/weights", default-features = false, features = ["serde"] } -docify = "0.2.7" +docify = "0.2.8" [dev-dependencies] criterion = "0.4.0" diff --git a/substrate/frame/system/benchmarking/Cargo.toml b/substrate/frame/system/benchmarking/Cargo.toml index 80fdff756c02b25f8ddbf24bd75330458159d199..473a6bb132d741cf9485868201f114234be9f5d8 100644 --- a/substrate/frame/system/benchmarking/Cargo.toml +++ b/substrate/frame/system/benchmarking/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../../benchmarking", default-features = false } frame-support = { path = "../../support", default-features = false } frame-system = { path = "..", default-features = false } diff --git a/substrate/frame/system/src/limits.rs b/substrate/frame/system/src/limits.rs index 5fd7a5af87571ff3a77442c1588a6f1fa5bee686..ab5a98a6b9745c044bb5d22847fb6db16cd4d573 100644 --- a/substrate/frame/system/src/limits.rs +++ b/substrate/frame/system/src/limits.rs @@ -378,7 +378,7 @@ impl BlockWeightsBuilder { /// class, based on the allowance. /// /// This is to make sure that extrinsics don't stay forever in the pool, - /// because they could seamingly fit the block (since they are below `max_block`), + /// because they could seemingly fit the block (since they are below `max_block`), /// but the cost of calling `on_initialize` always prevents them from being included. pub fn avg_block_initialization(mut self, init_cost: Perbill) -> Self { self.init_cost = Some(init_cost); diff --git a/substrate/frame/system/src/offchain.rs b/substrate/frame/system/src/offchain.rs index a019cfd666e8cb3ddccc02aa706692cbe6cee2e3..a64b326196403cb87e0970c061ac09e42136d4e1 100644 --- a/substrate/frame/system/src/offchain.rs +++ b/substrate/frame/system/src/offchain.rs @@ -22,7 +22,7 @@ //! This module provides transaction related helpers to: //! - Submit a raw unsigned transaction //! - Submit an unsigned transaction with a signed payload -//! - Submit a signed transction. +//! - Submit a signed transaction. //! //! ## Usage //! @@ -384,7 +384,7 @@ where /// /// // runtime-specific public key /// type Public = MultiSigner: From; -/// type Signature = MulitSignature: From; +/// type Signature = MultiSignature: From; /// ``` // TODO [#5662] Potentially use `IsWrappedBy` types, or find some other way to make it easy to // obtain unwrapped crypto (and wrap it back). @@ -444,7 +444,7 @@ pub trait SigningTypes: crate::Config { /// A public key that is capable of identifying `AccountId`s. /// /// Usually that's either a raw crypto public key (e.g. `sr25519::Public`) or - /// an aggregate type for multiple crypto public keys, like `MulitSigner`. + /// an aggregate type for multiple crypto public keys, like `MultiSigner`. type Public: Clone + PartialEq + IdentifyAccount diff --git a/substrate/frame/timestamp/Cargo.toml b/substrate/frame/timestamp/Cargo.toml index 28e57fcab0a79d18ea8d8a1b2b8be90cb05198db..da49b29c89b78f26b3f5eee35e81b7d2dacf8bb8 100644 --- a/substrate/frame/timestamp/Cargo.toml +++ b/substrate/frame/timestamp/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } @@ -30,7 +30,7 @@ sp-std = { path = "../../primitives/std", default-features = false } sp-storage = { path = "../../primitives/storage", default-features = false } sp-timestamp = { path = "../../primitives/timestamp", default-features = false } -docify = "0.2.7" +docify = "0.2.8" [dev-dependencies] sp-core = { path = "../../primitives/core" } diff --git a/substrate/frame/tips/Cargo.toml b/substrate/frame/tips/Cargo.toml index 7339cf0a8cce86ec15aebe6a55115f10360b1a7f..a2acf0638ffb0717ece5c132009b983c86ef2072 100644 --- a/substrate/frame/tips/Cargo.toml +++ b/substrate/frame/tips/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], optional = true, workspace = true, default-features = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/tips/src/migrations/mod.rs b/substrate/frame/tips/src/migrations/mod.rs index 9cdd01c17fbf6009e306caff31079ca0eb7a6ea1..a7917bfce16fb0057c184b2b736ee5788f3a2fe5 100644 --- a/substrate/frame/tips/src/migrations/mod.rs +++ b/substrate/frame/tips/src/migrations/mod.rs @@ -17,7 +17,7 @@ /// Version 4. /// -/// For backward compatability reasons, pallet-tips uses `Treasury` for storage module prefix +/// For backward compatibility reasons, pallet-tips uses `Treasury` for storage module prefix /// before calling this migration. After calling this migration, it will get replaced with /// own storage identifier. pub mod v4; diff --git a/substrate/frame/transaction-payment/Cargo.toml b/substrate/frame/transaction-payment/Cargo.toml index 275e1c01f927217726907f3e6045fc0b0d3681e5..24e5a714f0fe3aee0a5c2d74d3d837e8b969cdb1 100644 --- a/substrate/frame/transaction-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true, default-features = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml b/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml index 7640cc815e56c7fcecbb7d09c61c81635ca22491..fef9afdee05f37cbd2999b1bbfecb07f2601d3bd 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml @@ -24,7 +24,7 @@ frame-system = { path = "../../system", default-features = false } pallet-asset-conversion = { path = "../../asset-conversion", default-features = false } pallet-transaction-payment = { path = "..", default-features = false } codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } [dev-dependencies] sp-core = { path = "../../../primitives/core", default-features = false } diff --git a/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml b/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml index 1da3237df0817c989899b9180af984da9766aa81..fc4f1aecc15ee910ef90ffd18b39173d5de096e9 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml @@ -29,7 +29,7 @@ frame-benchmarking = { path = "../../benchmarking", default-features = false, op # Other dependencies codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true, default-features = true } [dev-dependencies] diff --git a/substrate/frame/transaction-payment/src/tests.rs b/substrate/frame/transaction-payment/src/tests.rs index d3a1721ccb9909b2f2a705d9bb9ddc9ebc7cb5ee..bc0efd2d64a3b27f5e280a0318f1e5822d583e47 100644 --- a/substrate/frame/transaction-payment/src/tests.rs +++ b/substrate/frame/transaction-payment/src/tests.rs @@ -384,7 +384,7 @@ fn query_call_info_and_fee_details_works() { adjusted_weight_fee: info .weight .min(BlockWeights::get().max_block) - .ref_time() as u64 * 2 * 3 / 2 /* weight * weight_fee * multipler */ + .ref_time() as u64 * 2 * 3 / 2 /* weight * weight_fee * multiplier */ }), tip: 0, }, diff --git a/substrate/frame/transaction-storage/Cargo.toml b/substrate/frame/transaction-storage/Cargo.toml index 1386d9b5a5691475bdc06a198a86cb944b7e8305..31741cf32d83edfeaa10d06060c79df07f2b50c9 100644 --- a/substrate/frame/transaction-storage/Cargo.toml +++ b/substrate/frame/transaction-storage/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = { version = "6.1", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true, default-features = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/transaction-storage/README.md b/substrate/frame/transaction-storage/README.md index 1066968469d4b7c629ac7243b9e7e20febdbdbab..b173c0a84d5a0b387a14abe0415733c603a7010a 100644 --- a/substrate/frame/transaction-storage/README.md +++ b/substrate/frame/transaction-storage/README.md @@ -79,7 +79,7 @@ ipfs block get /ipfs/ > kitten.jpeg ``` To renew data and prevent it from being disposed after the storage period, use `transactionStorage.renew(block, index)` -where `block` is the block number of the previous store or renew transction, and index is the index of that transaction +where `block` is the block number of the previous store or renew transaction, and index is the index of that transaction in the block. diff --git a/substrate/frame/transaction-storage/src/lib.rs b/substrate/frame/transaction-storage/src/lib.rs index d32cfe169ce0b3f3ef76040ea596673e7d02d2c7..398cb350c501ee18dd25073b4047b5045b69fa49 100644 --- a/substrate/frame/transaction-storage/src/lib.rs +++ b/substrate/frame/transaction-storage/src/lib.rs @@ -137,7 +137,7 @@ pub mod pallet { InvalidProof, /// Missing storage proof. MissingProof, - /// Unable to verify proof becasue state data is missing. + /// Unable to verify proof because state data is missing. MissingStateData, /// Double proof check in the block. DoubleCheck, diff --git a/substrate/frame/transaction-storage/src/tests.rs b/substrate/frame/transaction-storage/src/tests.rs index e17b3ca3bebd596f1d7071c45c986820169c4f94..621f74804eccae4b06f2d1f96e238334d038e68d 100644 --- a/substrate/frame/transaction-storage/src/tests.rs +++ b/substrate/frame/transaction-storage/src/tests.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Tests for transction-storage pallet. +//! Tests for transaction-storage pallet. use super::{Pallet as TransactionStorage, *}; use crate::mock::*; @@ -53,8 +53,8 @@ fn discards_data() { }; run_to_block(11, proof_provider); assert!(Transactions::::get(1).is_some()); - let transctions = Transactions::::get(1).unwrap(); - assert_eq!(transctions.len(), 2); + let transactions = Transactions::::get(1).unwrap(); + assert_eq!(transactions.len(), 2); assert_eq!(ChunkCount::::get(1), 16); run_to_block(12, proof_provider); assert!(Transactions::::get(1).is_none()); diff --git a/substrate/frame/treasury/Cargo.toml b/substrate/frame/treasury/Cargo.toml index 5f90904123d9341425ff707ae7682e2f20f28ecf..34037338a52bac4d18130e6a3c88b3c3a5d3dc60 100644 --- a/substrate/frame/treasury/Cargo.toml +++ b/substrate/frame/treasury/Cargo.toml @@ -20,9 +20,9 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", "max-encoded-len", ] } -docify = "0.2.7" +docify = "0.2.8" impl-trait-for-tuples = "0.2.2" -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], optional = true, workspace = true, default-features = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/treasury/src/lib.rs b/substrate/frame/treasury/src/lib.rs index 5e429d3914bd0c60ea24eb53825c4da4b6a07fe5..d569ae406ea39aa9734d145968924ba1309e2f60 100644 --- a/substrate/frame/treasury/src/lib.rs +++ b/substrate/frame/treasury/src/lib.rs @@ -621,7 +621,7 @@ pub mod pallet { with_context::>, _>(|v| { let context = v.or_default(); - // We group based on `max_amount`, to dinstinguish between different kind of + // We group based on `max_amount`, to distinguish between different kind of // origins. (assumes that all origins have different `max_amount`) // // Worst case is that we reject some "valid" request. @@ -1042,7 +1042,7 @@ impl, I: 'static> Pallet { /// ### Invariants of proposal storage items /// /// 1. [`ProposalCount`] >= Number of elements in [`Proposals`]. - /// 2. Each entry in [`Proposals`] should be saved under a key stricly less than current + /// 2. Each entry in [`Proposals`] should be saved under a key strictly less than current /// [`ProposalCount`]. /// 3. Each [`ProposalIndex`] contained in [`Approvals`] should exist in [`Proposals`]. /// Note, that this automatically implies [`Approvals`].count() <= [`Proposals`].count(). @@ -1078,7 +1078,7 @@ impl, I: 'static> Pallet { /// ## Invariants of spend storage items /// /// 1. [`SpendCount`] >= Number of elements in [`Spends`]. - /// 2. Each entry in [`Spends`] should be saved under a key stricly less than current + /// 2. Each entry in [`Spends`] should be saved under a key strictly less than current /// [`SpendCount`]. /// 3. For each spend entry contained in [`Spends`] we should have spend.expire_at /// > spend.valid_from. diff --git a/substrate/frame/tx-pause/Cargo.toml b/substrate/frame/tx-pause/Cargo.toml index ace2172454eb976946c4069c7ac69f781ca47f2d..5f028179037d1214aadb460fa464b79f55c05a23 100644 --- a/substrate/frame/tx-pause/Cargo.toml +++ b/substrate/frame/tx-pause/Cargo.toml @@ -16,11 +16,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } -docify = "0.2.7" +docify = "0.2.8" frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-runtime = { path = "../../primitives/runtime", default-features = false } sp-std = { path = "../../primitives/std", default-features = false } pallet-balances = { path = "../balances", default-features = false, optional = true } diff --git a/substrate/frame/uniques/Cargo.toml b/substrate/frame/uniques/Cargo.toml index 4e5f21b3d8df8fa45da0735cdd0ebe1afac6d181..ee6af191d33f31142a109ad7b11b6d2ff1104759 100644 --- a/substrate/frame/uniques/Cargo.toml +++ b/substrate/frame/uniques/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/uniques/src/migration.rs b/substrate/frame/uniques/src/migration.rs index ba0855a6bb657698d5c642181c3483459774a04f..90d44e7790d6d133bde9475851557cd625e559cb 100644 --- a/substrate/frame/uniques/src/migration.rs +++ b/substrate/frame/uniques/src/migration.rs @@ -18,15 +18,15 @@ //! Various pieces of common functionality. use super::*; use core::marker::PhantomData; -use frame_support::traits::{Get, OnRuntimeUpgrade}; +use frame_support::traits::{Get, UncheckedOnRuntimeUpgrade}; mod v1 { use super::*; /// Actual implementation of the storage migration. - pub struct MigrateToV1Impl(PhantomData<(T, I)>); + pub struct UncheckedMigrateToV1Impl(PhantomData<(T, I)>); - impl, I: 'static> OnRuntimeUpgrade for MigrateToV1Impl { + impl, I: 'static> UncheckedOnRuntimeUpgrade for UncheckedMigrateToV1Impl { fn on_runtime_upgrade() -> frame_support::weights::Weight { let mut count = 0; for (collection, detail) in Collection::::iter() { @@ -49,7 +49,7 @@ mod v1 { pub type MigrateV0ToV1 = frame_support::migrations::VersionedMigration< 0, 1, - v1::MigrateToV1Impl, + v1::UncheckedMigrateToV1Impl, Pallet, ::DbWeight, >; diff --git a/substrate/frame/uniques/src/tests.rs b/substrate/frame/uniques/src/tests.rs index 351dac09f7f202a62df508fe71d26bb24662d5bc..afd0352bf90eb27164f3a714393e33d1274da1f9 100644 --- a/substrate/frame/uniques/src/tests.rs +++ b/substrate/frame/uniques/src/tests.rs @@ -289,7 +289,7 @@ fn transfer_owner_should_work() { assert_eq!(Balances::reserved_balance(&2), 0); assert_eq!(Balances::reserved_balance(&3), 45); - // 2's acceptence from before is reset when it became owner, so it cannot be transfered + // 2's acceptance from before is reset when it became owner, so it cannot be transferred // without a fresh acceptance. assert_noop!( Uniques::transfer_ownership(RuntimeOrigin::signed(3), 0, 2), @@ -692,7 +692,7 @@ fn approved_account_gets_reset_after_transfer() { assert_ok!(Uniques::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3)); assert_ok!(Uniques::transfer(RuntimeOrigin::signed(2), 0, 42, 5)); - // this shouldn't work because we have just transfered the item to another account. + // this shouldn't work because we have just transferred the item to another account. assert_noop!( Uniques::transfer(RuntimeOrigin::signed(3), 0, 42, 4), Error::::NoPermission diff --git a/substrate/frame/utility/Cargo.toml b/substrate/frame/utility/Cargo.toml index ce5cd0fa61f2166b1993eb2150953c13fa1cc5e3..2ad575ed51ffe708b9369a81ea0a4aa5d50e600b 100644 --- a/substrate/frame/utility/Cargo.toml +++ b/substrate/frame/utility/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/utility/README.md b/substrate/frame/utility/README.md index 00fff76cd626bcb119f971edb910c033024b54c8..0a6769ae1c7c14a8480cd58a43c6a2e495a10906 100644 --- a/substrate/frame/utility/README.md +++ b/substrate/frame/utility/README.md @@ -17,7 +17,7 @@ This module contains two basic pieces of functionality: need multiple distinct accounts (e.g. as controllers for many staking accounts), but where it's perfectly fine to have each of them controlled by the same underlying keypair. Derivative accounts are, for the purposes of proxy filtering considered exactly the same as - the oigin and are thus hampered with the origin's filters. + the origin and are thus hampered with the origin's filters. Since proxy filters are respected in all dispatches of this module, it should never need to be filtered by any proxy. diff --git a/substrate/frame/vesting/Cargo.toml b/substrate/frame/vesting/Cargo.toml index 96938b95a2ad4c09f3ec821ecc14c5cc3bc68270..e71731e397788a956bd72a74e93b9ac10d0fd7a6 100644 --- a/substrate/frame/vesting/Cargo.toml +++ b/substrate/frame/vesting/Cargo.toml @@ -20,7 +20,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/vesting/src/migrations.rs b/substrate/frame/vesting/src/migrations.rs index cac3c90b403ab4b1e037e140c0598aa83d69816e..6fe82312b637d550bfe164c62155bb9261e75801 100644 --- a/substrate/frame/vesting/src/migrations.rs +++ b/substrate/frame/vesting/src/migrations.rs @@ -29,7 +29,7 @@ pub mod v1 { log::debug!( target: "runtime::vesting", - "migration: Vesting storage version v1 PRE migration checks succesful!" + "migration: Vesting storage version v1 PRE migration checks successful!" ); Ok(()) diff --git a/substrate/frame/whitelist/Cargo.toml b/substrate/frame/whitelist/Cargo.toml index 1a867f8075d12a35f82e8fe37f2cf25230b710a3..5c28fe29142c786621eee8fc4355983e990a8981 100644 --- a/substrate/frame/whitelist/Cargo.toml +++ b/substrate/frame/whitelist/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/primitives/api/Cargo.toml b/substrate/primitives/api/Cargo.toml index f4b1d13c520399a9d5960bf90958fdd5b154d10a..544ba72141ebd22291c04a54f34adbd873b335aa 100644 --- a/substrate/primitives/api/Cargo.toml +++ b/substrate/primitives/api/Cargo.toml @@ -28,7 +28,7 @@ sp-state-machine = { path = "../state-machine", default-features = false, option sp-trie = { path = "../trie", default-features = false, optional = true } hash-db = { version = "0.16.0", optional = true } thiserror = { optional = true, workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = [ +scale-info = { version = "2.11.1", default-features = false, features = [ "derive", ] } sp-metadata-ir = { path = "../metadata-ir", default-features = false, optional = true } diff --git a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs index b7e5600a017a9978fb20286ad5724fc21936ca59..87a381fd7bf92bc72e30babf018b5dcb0141e571 100644 --- a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -797,7 +797,7 @@ fn generate_runtime_api_versions(impls: &[ItemImpl]) -> Result { } Ok(quote!( - const RUNTIME_API_VERSIONS: #c::ApisVec = #c::create_apis_vec!([ #( #result ),* ]); + pub const RUNTIME_API_VERSIONS: #c::ApisVec = #c::create_apis_vec!([ #( #result ),* ]); #( #sections )* )) diff --git a/substrate/primitives/api/test/Cargo.toml b/substrate/primitives/api/test/Cargo.toml index 3a90553bbf000c1b1ea70ae11e7532a456f1c2f2..52a4bd7bda30b30978e15fa1277621fcdff14f59 100644 --- a/substrate/primitives/api/test/Cargo.toml +++ b/substrate/primitives/api/test/Cargo.toml @@ -26,11 +26,11 @@ codec = { package = "parity-scale-codec", version = "3.6.1" } sp-state-machine = { path = "../../state-machine" } trybuild = "1.0.88" rustversion = "1.0.6" -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } [dev-dependencies] criterion = "0.4.0" -futures = "0.3.21" +futures = "0.3.30" log = { workspace = true, default-features = true } sp-core = { path = "../../core" } static_assertions = "1.1.0" diff --git a/substrate/primitives/application-crypto/Cargo.toml b/substrate/primitives/application-crypto/Cargo.toml index 6f90a2b6262e50d7edd23e415924a8259f0a0d96..20e2be4d1552f4874dad7e0902ccd9c00a660976 100644 --- a/substrate/primitives/application-crypto/Cargo.toml +++ b/substrate/primitives/application-crypto/Cargo.toml @@ -20,7 +20,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] sp-core = { path = "../core", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, features = ["alloc", "derive"], workspace = true } sp-std = { path = "../std", default-features = false } sp-io = { path = "../io", default-features = false } diff --git a/substrate/primitives/application-crypto/src/lib.rs b/substrate/primitives/application-crypto/src/lib.rs index ea2e5a83127bb50a8060e22d81660c80e9e18ee2..2355f1ba527d5227a804fc30ae2deccc44df2152 100644 --- a/substrate/primitives/application-crypto/src/lib.rs +++ b/substrate/primitives/application-crypto/src/lib.rs @@ -26,7 +26,7 @@ pub use sp_core::crypto::{DeriveError, Pair, SecretStringError}; #[doc(hidden)] pub use sp_core::{ self, - crypto::{ByteArray, CryptoType, Derive, IsWrappedBy, Public, UncheckedFrom, Wraps}, + crypto::{ByteArray, CryptoType, Derive, IsWrappedBy, Public, Signature, UncheckedFrom, Wraps}, RuntimeDebug, }; @@ -505,6 +505,12 @@ macro_rules! app_crypto_signature_common { } } + impl AsMut<[u8]> for Signature { + fn as_mut(&mut self) -> &mut [u8] { + self.0.as_mut() + } + } + impl $crate::AppSignature for Signature { type Generic = $sig; } @@ -525,6 +531,12 @@ macro_rules! app_crypto_signature_common { } } + impl $crate::Signature for Signature {} + + impl $crate::ByteArray for Signature { + const LEN: usize = <$sig>::LEN; + } + impl Signature { /// Convert into wrapped generic signature type. pub fn into_inner(self) -> $sig { diff --git a/substrate/primitives/arithmetic/Cargo.toml b/substrate/primitives/arithmetic/Cargo.toml index 120edd06a66099c18efc4334f28f7031cb6a1bed..16eae43c73fa60da63dd249f7934c70e805dd8f2 100644 --- a/substrate/primitives/arithmetic/Cargo.toml +++ b/substrate/primitives/arithmetic/Cargo.toml @@ -23,11 +23,11 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = ] } integer-sqrt = "0.1.2" num-traits = { version = "0.2.17", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], optional = true, workspace = true } static_assertions = "1.1.0" sp-std = { path = "../std", default-features = false } -docify = "0.2.7" +docify = "0.2.8" [dev-dependencies] criterion = "0.4.0" diff --git a/substrate/primitives/arithmetic/fuzzer/src/fixed_point.rs b/substrate/primitives/arithmetic/fuzzer/src/fixed_point.rs index e76dd1503e39fddb5dbfde601d5a86d28c5b1b88..e2d31065635ebc4d98edb5c6e5df39893ae65b9d 100644 --- a/substrate/primitives/arithmetic/fuzzer/src/fixed_point.rs +++ b/substrate/primitives/arithmetic/fuzzer/src/fixed_point.rs @@ -66,7 +66,7 @@ fn main() { let c = FixedI64::saturating_from_integer(x.saturating_add(y)); assert_eq!(a.saturating_add(b), c); - // Check substraction. + // Check subtraction. let a = FixedI64::saturating_from_integer(x); let b = FixedI64::saturating_from_integer(y); let c = FixedI64::saturating_from_integer(x.saturating_sub(y)); diff --git a/substrate/primitives/arithmetic/src/fixed_point.rs b/substrate/primitives/arithmetic/src/fixed_point.rs index 736a900bde242163b52a00c64183912974ce3c53..c4e9259c5fc94e3494a4df2bacc36fcdb562e006 100644 --- a/substrate/primitives/arithmetic/src/fixed_point.rs +++ b/substrate/primitives/arithmetic/src/fixed_point.rs @@ -568,7 +568,7 @@ macro_rules! implement_fixed { let v = self.0 as u128; // Want x' = sqrt(x) where x = n/D and x' = n'/D (D is fixed) - // Our prefered way is: + // Our preferred way is: // sqrt(n/D) = sqrt(nD / D^2) = sqrt(nD)/sqrt(D^2) = sqrt(nD)/D // ergo n' = sqrt(nD) // but this requires nD to fit into our type. diff --git a/substrate/primitives/authority-discovery/Cargo.toml b/substrate/primitives/authority-discovery/Cargo.toml index 8ee8bb94ed97bf4283f2853cc28a8525a6448da6..88d93f40059625dd271b734c23fd635113dcbac9 100644 --- a/substrate/primitives/authority-discovery/Cargo.toml +++ b/substrate/primitives/authority-discovery/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-api = { path = "../api", default-features = false } sp-application-crypto = { path = "../application-crypto", default-features = false } sp-runtime = { path = "../runtime", default-features = false } diff --git a/substrate/primitives/blockchain/Cargo.toml b/substrate/primitives/blockchain/Cargo.toml index 9d13d627eebc36dcc5bdce3efd073a7dc29012a0..e716b61bfeb1668a42434aed147fa0810a1b7dca 100644 --- a/substrate/primitives/blockchain/Cargo.toml +++ b/substrate/primitives/blockchain/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -futures = "0.3.21" +futures = "0.3.30" log = { workspace = true, default-features = true } parking_lot = "0.12.1" schnellru = "0.2.1" diff --git a/substrate/primitives/blockchain/src/backend.rs b/substrate/primitives/blockchain/src/backend.rs index 8208f9128e714511c2a3604d7d754e4d87d62530..7a09865f858d3faa4ec1ee1fd5a2beb84e9267d7 100644 --- a/substrate/primitives/blockchain/src/backend.rs +++ b/substrate/primitives/blockchain/src/backend.rs @@ -187,7 +187,7 @@ pub trait Backend: /// a block with the given `base_hash`. /// /// The search space is always limited to blocks which are in the finalized - /// chain or descendents of it. + /// chain or descendants of it. /// /// Returns `Ok(None)` if `base_hash` is not found in search space. // TODO: document time complexity of this, see [#1444](https://github.com/paritytech/substrate/issues/1444) diff --git a/substrate/primitives/blockchain/src/error.rs b/substrate/primitives/blockchain/src/error.rs index 74a2ed3fba50d1b349370a831a92af5d8e78950f..e8ac148d7511ec9cbbf6564015fd63e12d7ec06c 100644 --- a/substrate/primitives/blockchain/src/error.rs +++ b/substrate/primitives/blockchain/src/error.rs @@ -34,7 +34,7 @@ pub enum ApplyExtrinsicFailed { /// The transaction cannot be included into the current block. /// /// This doesn't necessary mean that the transaction itself is invalid, but it might be just - /// unappliable onto the current block. + /// unapplicable onto the current block. #[error("Extrinsic is not valid: {0:?}")] Validity(#[from] TransactionValidityError), diff --git a/substrate/primitives/blockchain/src/header_metadata.rs b/substrate/primitives/blockchain/src/header_metadata.rs index 08b3c9ab3dfbda6acc7a0ad6b575c518dc97ecc3..ccd640c0567afc6c9f9341fca754471a15a2829c 100644 --- a/substrate/primitives/blockchain/src/header_metadata.rs +++ b/substrate/primitives/blockchain/src/header_metadata.rs @@ -178,7 +178,7 @@ pub struct TreeRoute { impl TreeRoute { /// Creates a new `TreeRoute`. /// - /// To preserve the structure safety invariats it is required that `pivot < route.len()`. + /// To preserve the structure safety invariants it is required that `pivot < route.len()`. pub fn new(route: Vec>, pivot: usize) -> Result { if pivot < route.len() { Ok(TreeRoute { route, pivot }) @@ -212,7 +212,7 @@ impl TreeRoute { ) } - /// Get a slice of enacted blocks (descendents of the common ancestor) + /// Get a slice of enacted blocks (descendants of the common ancestor) pub fn enacted(&self) -> &[HashAndNumber] { &self.route[self.pivot + 1..] } diff --git a/substrate/primitives/consensus/aura/Cargo.toml b/substrate/primitives/consensus/aura/Cargo.toml index 0cedc59ea8fb1d06c7fc791561d1b3674a378300..b689c84f158c7bddc1e845449654056b67e78b6f 100644 --- a/substrate/primitives/consensus/aura/Cargo.toml +++ b/substrate/primitives/consensus/aura/Cargo.toml @@ -16,9 +16,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.74", optional = true } +async-trait = { version = "0.1.79", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-api = { path = "../../api", default-features = false } sp-application-crypto = { path = "../../application-crypto", default-features = false } sp-consensus-slots = { path = "../slots", default-features = false } diff --git a/substrate/primitives/consensus/babe/Cargo.toml b/substrate/primitives/consensus/babe/Cargo.toml index 724b9fd3e28925c3453cc86e2c1b7bea061676d0..2420f48b1f4aa97923a556de37a06a6611ba8e05 100644 --- a/substrate/primitives/consensus/babe/Cargo.toml +++ b/substrate/primitives/consensus/babe/Cargo.toml @@ -16,9 +16,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.74", optional = true } +async-trait = { version = "0.1.79", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-api = { path = "../../api", default-features = false } sp-application-crypto = { path = "../../application-crypto", default-features = false } diff --git a/substrate/primitives/consensus/babe/src/lib.rs b/substrate/primitives/consensus/babe/src/lib.rs index 6eb75b270a02815e1d37e50ff8f9da9d1be5f8c6..ee07da6829f52933424a838e36a2c1246bf9aeb7 100644 --- a/substrate/primitives/consensus/babe/src/lib.rs +++ b/substrate/primitives/consensus/babe/src/lib.rs @@ -266,7 +266,7 @@ impl Default for BabeEpochConfiguration { } /// Verifies the equivocation proof by making sure that: both headers have -/// different hashes, are targetting the same slot, and have valid signatures by +/// different hashes, are targeting the same slot, and have valid signatures by /// the same authority. pub fn check_equivocation_proof(proof: EquivocationProof) -> bool where @@ -298,7 +298,7 @@ where let first_pre_digest = find_pre_digest(&proof.first_header)?; let second_pre_digest = find_pre_digest(&proof.second_header)?; - // both headers must be targetting the same slot and it must + // both headers must be targeting the same slot and it must // be the same as the one in the proof. if proof.slot != first_pre_digest.slot() || first_pre_digest.slot() != second_pre_digest.slot() diff --git a/substrate/primitives/consensus/beefy/Cargo.toml b/substrate/primitives/consensus/beefy/Cargo.toml index fbcc6e0c1048a55f405e479ed9a195154f6fe823..a16d943b91469d0d24ba6453e6b08a6385ec2f36 100644 --- a/substrate/primitives/consensus/beefy/Cargo.toml +++ b/substrate/primitives/consensus/beefy/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, features = ["alloc", "derive"], workspace = true } sp-api = { path = "../../api", default-features = false } sp-application-crypto = { path = "../../application-crypto", default-features = false } @@ -26,7 +26,7 @@ sp-io = { path = "../../io", default-features = false } sp-mmr-primitives = { path = "../../merkle-mountain-range", default-features = false } sp-runtime = { path = "../../runtime", default-features = false } sp-keystore = { path = "../../keystore", default-features = false } -strum = { version = "0.24.1", features = ["derive"], default-features = false } +strum = { version = "0.26.2", features = ["derive"], default-features = false } lazy_static = { version = "1.4.0", optional = true } [dev-dependencies] diff --git a/substrate/primitives/consensus/beefy/src/lib.rs b/substrate/primitives/consensus/beefy/src/lib.rs index b3f62ead736c4921d1fa941b19902c8c234c8f78..70978ca559dd2b50041c45d9cab3c874e8bcc852 100644 --- a/substrate/primitives/consensus/beefy/src/lib.rs +++ b/substrate/primitives/consensus/beefy/src/lib.rs @@ -200,7 +200,7 @@ pub mod ecdsa_bls_crypto { fn verify(&self, signature: &::Signature, msg: &[u8]) -> bool { // We can not simply call // `EcdsaBlsPair::verify(signature.as_inner_ref(), msg, self.as_inner_ref())` - // because that invokes ECDSA default verification which perfoms Blake2b hash + // because that invokes ECDSA default verification which performs Blake2b hash // which we don't want. This is because ECDSA signatures are meant to be verified // on Ethereum network where Keccak hasher is significantly cheaper than Blake2b. // See Figure 3 of [OnSc21](https://www.scitepress.org/Papers/2021/106066/106066.pdf) diff --git a/substrate/primitives/consensus/beefy/src/mmr.rs b/substrate/primitives/consensus/beefy/src/mmr.rs index 74851ece7acf2d47c540f3d14f3c388ae0eb22f1..0bc303d51c01409253ac2de1a34430fd085b7f1a 100644 --- a/substrate/primitives/consensus/beefy/src/mmr.rs +++ b/substrate/primitives/consensus/beefy/src/mmr.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! BEEFY + MMR utilties. +//! BEEFY + MMR utilities. //! //! While BEEFY can be used completely independently as an additional consensus gadget, //! it is designed around a main use case of bridging standalone networks together. @@ -77,7 +77,7 @@ pub struct MmrLeaf { /// /// Given that adding new struct elements in SCALE is backward compatible (i.e. old format can be /// still decoded, the new fields will simply be ignored). We expect the major version to be bumped -/// very rarely (hopefuly never). +/// very rarely (hopefully never). #[derive(Debug, Default, PartialEq, Eq, Clone, Encode, Decode, TypeInfo)] pub struct MmrLeafVersion(u8); impl MmrLeafVersion { diff --git a/substrate/primitives/consensus/beefy/src/payload.rs b/substrate/primitives/consensus/beefy/src/payload.rs index dff017b49e091b28151c7e400e8d91062e6f05f2..1a06e620e7ad400ed10c5451d453f403da1f3688 100644 --- a/substrate/primitives/consensus/beefy/src/payload.rs +++ b/substrate/primitives/consensus/beefy/src/payload.rs @@ -43,7 +43,7 @@ pub mod known_payloads { pub struct Payload(Vec<(BeefyPayloadId, Vec)>); impl Payload { - /// Construct a new payload given an initial vallue + /// Construct a new payload given an initial value pub fn from_single_entry(id: BeefyPayloadId, value: Vec) -> Self { Self(vec![(id, value)]) } diff --git a/substrate/primitives/consensus/common/Cargo.toml b/substrate/primitives/consensus/common/Cargo.toml index 048e31b0265f57f0404b3777897a27f42fe3de68..90aeadd5055e6e17107b70070ca30e17dd6b1c7c 100644 --- a/substrate/primitives/consensus/common/Cargo.toml +++ b/substrate/primitives/consensus/common/Cargo.toml @@ -17,8 +17,8 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.74" -futures = { version = "0.3.21", features = ["thread-pool"] } +async-trait = "0.1.79" +futures = { version = "0.3.30", features = ["thread-pool"] } log = { workspace = true, default-features = true } thiserror = { workspace = true } sp-core = { path = "../../core" } @@ -27,7 +27,7 @@ sp-runtime = { path = "../../runtime" } sp-state-machine = { path = "../../state-machine" } [dev-dependencies] -futures = "0.3.21" +futures = "0.3.30" sp-test-primitives = { path = "../../test-primitives" } [features] diff --git a/substrate/primitives/consensus/common/src/lib.rs b/substrate/primitives/consensus/common/src/lib.rs index 6505d005deb8df42c04252be85afd7e7b55924b9..01d3b7a24f9c143201f07092c7484e72f513d8f0 100644 --- a/substrate/primitives/consensus/common/src/lib.rs +++ b/substrate/primitives/consensus/common/src/lib.rs @@ -182,7 +182,7 @@ pub trait Proposer { + Send + Unpin + 'static; - /// The supported proof recording by the implementator of this trait. See [`ProofRecording`] + /// The supported proof recording by the implementor of this trait. See [`ProofRecording`] /// for more information. type ProofRecording: self::ProofRecording + Send + Sync + 'static; /// The proof type used by [`Self::ProofRecording`]. diff --git a/substrate/primitives/consensus/grandpa/Cargo.toml b/substrate/primitives/consensus/grandpa/Cargo.toml index 1f2da55c5a1668b10902e71297e1b12a121d6f41..6c228383d003fbafa879a88577b37609ce9d0278 100644 --- a/substrate/primitives/consensus/grandpa/Cargo.toml +++ b/substrate/primitives/consensus/grandpa/Cargo.toml @@ -20,7 +20,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } grandpa = { package = "finality-grandpa", version = "0.16.2", default-features = false, features = ["derive-codec"] } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-api = { path = "../../api", default-features = false } sp-application-crypto = { path = "../../application-crypto", default-features = false } diff --git a/substrate/primitives/consensus/sassafras/Cargo.toml b/substrate/primitives/consensus/sassafras/Cargo.toml index 085709d4c8b5be184a3eac016d20b626db6a8528..07304ed9b2401bfb40ac321ba476be0fd304d14e 100644 --- a/substrate/primitives/consensus/sassafras/Cargo.toml +++ b/substrate/primitives/consensus/sassafras/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] scale-codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], optional = true, workspace = true } sp-api = { path = "../../api", default-features = false } sp-application-crypto = { path = "../../application-crypto", default-features = false, features = ["bandersnatch-experimental"] } diff --git a/substrate/primitives/consensus/sassafras/src/ticket.rs b/substrate/primitives/consensus/sassafras/src/ticket.rs index dc0a61990d3ea7248a4bbbbf69a6eed72942719f..345de99be28d88ec5125067f0eaaa838b13373fd 100644 --- a/substrate/primitives/consensus/sassafras/src/ticket.rs +++ b/substrate/primitives/consensus/sassafras/src/ticket.rs @@ -115,7 +115,7 @@ mod tests { let threshold = ticket_id_threshold(redundancy, slots, attempts, validators); let threshold = threshold as f64 / TicketId::MAX as f64; - // We expect that the total number of tickets allowed to be submited + // We expect that the total number of tickets allowed to be submitted // is slots*redundancy let avt = ((attempts * validators) as f64 * threshold) as u32; assert_eq!(avt, slots * redundancy); diff --git a/substrate/primitives/consensus/sassafras/src/vrf.rs b/substrate/primitives/consensus/sassafras/src/vrf.rs index 815edb5eb661ed94161f15f34a8f26311640be49..537cff52ab6fb7e42c0309431e5dc4b5a767716a 100644 --- a/substrate/primitives/consensus/sassafras/src/vrf.rs +++ b/substrate/primitives/consensus/sassafras/src/vrf.rs @@ -101,7 +101,7 @@ pub fn make_ticket_id(input: &VrfInput, pre_output: &VrfPreOutput) -> TicketId { u128::from_le_bytes(bytes) } -/// Make revealed key seed from a given VRF input and pre-ouput. +/// Make revealed key seed from a given VRF input and pre-output. /// /// Input should have been obtained via [`revealed_key_input`]. /// Pre-output should have been obtained from the input directly using the vrf diff --git a/substrate/primitives/consensus/slots/Cargo.toml b/substrate/primitives/consensus/slots/Cargo.toml index 94c02dba203dc938062546dabe3cebd32411dbde..a8b12900617960f953726d1b4eac5bf585b1f6bf 100644 --- a/substrate/primitives/consensus/slots/Cargo.toml +++ b/substrate/primitives/consensus/slots/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-timestamp = { path = "../../timestamp", default-features = false } diff --git a/substrate/primitives/core/Cargo.toml b/substrate/primitives/core/Cargo.toml index 908f2498de5330c307e164f35c6f9ccfc1aa7b76..833b2af95cd10c5b533f56affb7a50f237d54905 100644 --- a/substrate/primitives/core/Cargo.toml +++ b/substrate/primitives/core/Cargo.toml @@ -38,7 +38,7 @@ sp-std = { path = "../std", default-features = false } sp-debug-derive = { path = "../debug-derive", default-features = false } sp-storage = { path = "../storage", default-features = false } sp-externalities = { path = "../externalities", optional = true } -futures = { version = "0.3.21", optional = true } +futures = { version = "0.3.30", optional = true } dyn-clonable = { version = "0.9.0", optional = true } thiserror = { optional = true, workspace = true } tracing = { version = "0.1.29", optional = true } diff --git a/substrate/primitives/core/src/address_uri.rs b/substrate/primitives/core/src/address_uri.rs index 2e32d0cd86dffea77bb47a2b025d67502a72898b..d44f3c0c87c40dd447ca0e3123b74f9464408666 100644 --- a/substrate/primitives/core/src/address_uri.rs +++ b/substrate/primitives/core/src/address_uri.rs @@ -85,7 +85,7 @@ impl Error { /// Complementary error information. /// -/// Strucutre contains complementary information about parsing address URI string. +/// Structure contains complementary information about parsing address URI string. /// String contains a copy of an original URI string, 0-based integer indicates position of invalid /// character. #[derive(Debug, PartialEq, Eq, Clone)] diff --git a/substrate/primitives/core/src/bandersnatch.rs b/substrate/primitives/core/src/bandersnatch.rs index c9d5f27b47b36795a5fdb3955c4da5bf3a8fdc26..71ee2da5383489a42f1c3a961089b031e2764964 100644 --- a/substrate/primitives/core/src/bandersnatch.rs +++ b/substrate/primitives/core/src/bandersnatch.rs @@ -20,19 +20,12 @@ //! //! The primitive can operate both as a regular VRF or as an anonymized Ring VRF. -#[cfg(feature = "serde")] -use crate::crypto::Ss58Codec; #[cfg(feature = "full_crypto")] use crate::crypto::VrfSecret; use crate::crypto::{ - ByteArray, CryptoType, CryptoTypeId, Derive, DeriveError, DeriveJunction, Pair as TraitPair, - Public as TraitPublic, PublicBytes, SecretStringError, SignatureBytes, UncheckedFrom, - VrfPublic, + ByteArray, CryptoType, CryptoTypeId, DeriveError, DeriveJunction, Pair as TraitPair, + PublicBytes, SecretStringError, SignatureBytes, UncheckedFrom, VrfPublic, }; -#[cfg(feature = "serde")] -use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; -#[cfg(all(not(feature = "std"), feature = "serde"))] -use sp_std::alloc::{format, string::String}; use bandersnatch_vrfs::{CanonicalSerialize, SecretKey}; use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; @@ -64,42 +57,10 @@ pub struct BandersnatchTag; /// Bandersnatch public key. pub type Public = PublicBytes; -impl TraitPublic for Public {} - impl CryptoType for Public { type Pair = Pair; } -impl Derive for Public {} - -impl sp_std::fmt::Debug for Public { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - let s = self.to_ss58check(); - write!(f, "{} ({}...)", crate::hexdisplay::HexDisplay::from(&self.as_ref()), &s[0..8]) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } -} - -#[cfg(feature = "serde")] -impl Serialize for Public { - fn serialize(&self, serializer: S) -> Result { - serializer.serialize_str(&self.to_ss58check()) - } -} - -#[cfg(feature = "serde")] -impl<'de> Deserialize<'de> for Public { - fn deserialize>(deserializer: D) -> Result { - Public::from_ss58check(&String::deserialize(deserializer)?) - .map_err(|e| de::Error::custom(format!("{:?}", e))) - } -} - /// Bandersnatch signature. /// /// The signature is created via the [`VrfSecret::vrf_sign`] using [`SIGNING_CTX`] as transcript @@ -110,18 +71,6 @@ impl CryptoType for Signature { type Pair = Pair; } -impl sp_std::fmt::Debug for Signature { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - write!(f, "{}", crate::hexdisplay::HexDisplay::from(&self.0)) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } -} - /// The raw secret seed, which can be used to reconstruct the secret [`Pair`]. type Seed = [u8; SEED_SERIALIZED_SIZE]; @@ -300,7 +249,7 @@ pub mod vrf { /// /// The `transcript` summarizes a set of messages which are defining a particular /// protocol by automating the Fiat-Shamir transform for challenge generation. - /// A good explaination of the topic can be found in Merlin [docs](https://merlin.cool/) + /// A good explanation of the topic can be found in Merlin [docs](https://merlin.cool/) /// /// The `inputs` is a sequence of [`VrfInput`]s which, during the signing procedure, are /// first transformed to [`VrfPreOutput`]s. Both inputs and pre-outputs are then appended to diff --git a/substrate/primitives/core/src/bls.rs b/substrate/primitives/core/src/bls.rs index 9492a14ff0d8224ec4e71c888a1880f3dd9c786e..bb04babb3f180f3728c3a0e8159b725aeb7a2d47 100644 --- a/substrate/primitives/core/src/bls.rs +++ b/substrate/primitives/core/src/bls.rs @@ -23,17 +23,11 @@ //! Chaum-Pedersen proof uses the same hash-to-field specified in RFC 9380 for the field of the BLS //! curve. -#[cfg(feature = "serde")] -use crate::crypto::Ss58Codec; use crate::crypto::{ - CryptoType, Derive, DeriveError, DeriveJunction, Pair as TraitPair, Public as TraitPublic, - PublicBytes, SecretStringError, SignatureBytes, UncheckedFrom, + CryptoType, DeriveError, DeriveJunction, Pair as TraitPair, PublicBytes, SecretStringError, + SignatureBytes, UncheckedFrom, }; -#[cfg(feature = "serde")] -use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; -#[cfg(all(not(feature = "std"), feature = "serde"))] -use sp_std::alloc::{format, string::String}; use sp_std::vec::Vec; use w3f_bls::{ @@ -115,68 +109,6 @@ pub struct BlsTag; /// A public key. pub type Public = PublicBytes; -impl From> for Public { - fn from(x: Pair) -> Self { - x.public() - } -} - -#[cfg(feature = "std")] -impl std::str::FromStr for Public { - type Err = crate::crypto::PublicError; - - fn from_str(s: &str) -> Result { - Self::from_ss58check(s) - } -} - -#[cfg(feature = "std")] -impl std::fmt::Display for Public { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self.to_ss58check()) - } -} - -#[cfg(feature = "std")] -impl sp_std::fmt::Debug for Public { - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - let s = self.to_ss58check(); - write!(f, "{} ({}...)", crate::hexdisplay::HexDisplay::from(&self.0), &s[0..8]) - } -} - -#[cfg(not(feature = "std"))] -impl sp_std::fmt::Debug for Public { - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } -} - -#[cfg(feature = "serde")] -impl Serialize for Public { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&self.to_ss58check()) - } -} - -#[cfg(feature = "serde")] -impl<'de, T: BlsBound> Deserialize<'de> for Public { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - Public::from_ss58check(&String::deserialize(deserializer)?) - .map_err(|e| de::Error::custom(format!("{:?}", e))) - } -} - -impl TraitPublic for Public {} - -impl Derive for Public {} - impl CryptoType for Public { type Pair = Pair; } @@ -184,41 +116,6 @@ impl CryptoType for Public { /// A generic BLS signature. pub type Signature = SignatureBytes; -#[cfg(feature = "serde")] -impl Serialize for Signature { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&array_bytes::bytes2hex("", self)) - } -} - -#[cfg(feature = "serde")] -impl<'de, T> Deserialize<'de> for Signature { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let signature_hex = array_bytes::hex2bytes(&String::deserialize(deserializer)?) - .map_err(|e| de::Error::custom(format!("{:?}", e)))?; - Signature::try_from(signature_hex.as_ref()) - .map_err(|e| de::Error::custom(format!("{:?}", e))) - } -} - -impl sp_std::fmt::Debug for Signature { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - write!(f, "{}", crate::hexdisplay::HexDisplay::from(&self.0)) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } -} - impl CryptoType for Signature { type Pair = Pair; } @@ -333,8 +230,10 @@ impl CryptoType for Pair { // Test set exercising the BLS12-377 implementation #[cfg(test)] -mod test { +mod tests { use super::*; + #[cfg(feature = "serde")] + use crate::crypto::Ss58Codec; use crate::crypto::DEV_PHRASE; use bls377::{Pair, Signature}; diff --git a/substrate/primitives/core/src/crypto.rs b/substrate/primitives/core/src/crypto.rs index a2bdc6ed58e2b031c379ce27ca86e2fe6a437fc1..b13899fff5176eb76defa5ef32f840ad2c8c0ceb 100644 --- a/substrate/primitives/core/src/crypto.rs +++ b/substrate/primitives/core/src/crypto.rs @@ -485,8 +485,11 @@ pub trait ByteArray: AsRef<[u8]> + AsMut<[u8]> + for<'a> TryFrom<&'a [u8], Error } } -/// Trait suitable for typical cryptographic key public type. -pub trait Public: CryptoType + ByteArray + Derive + PartialEq + Eq + Clone + Send + Sync {} +/// Trait suitable for cryptographic public keys. +pub trait Public: CryptoType + ByteArray + PartialEq + Eq + Clone + Send + Sync + Derive {} + +/// Trait suitable for cryptographic signatures. +pub trait Signature: CryptoType + ByteArray + PartialEq + Eq + Clone + Send + Sync {} /// An opaque 32-byte cryptographic identifier. #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, MaxEncodedLen, TypeInfo)] @@ -648,32 +651,11 @@ pub use self::dummy::*; mod dummy { use super::*; - /// Dummy cryptography. Doesn't do anything. - #[derive(Clone, Hash, Default, Eq, PartialEq)] - pub struct Dummy; - - impl AsRef<[u8]> for Dummy { - fn as_ref(&self) -> &[u8] { - &b""[..] - } - } + #[doc(hidden)] + pub struct DummyTag; - impl AsMut<[u8]> for Dummy { - fn as_mut(&mut self) -> &mut [u8] { - unsafe { - #[allow(mutable_transmutes)] - sp_std::mem::transmute::<_, &'static mut [u8]>(&b""[..]) - } - } - } - - impl<'a> TryFrom<&'a [u8]> for Dummy { - type Error = (); - - fn try_from(_: &'a [u8]) -> Result { - Ok(Self) - } - } + /// Dummy cryptography. Doesn't do anything. + pub type Dummy = CryptoBytes<0, DummyTag>; impl CryptoType for Dummy { type Pair = Dummy; @@ -681,21 +663,10 @@ mod dummy { impl Derive for Dummy {} - impl ByteArray for Dummy { - const LEN: usize = 0; - fn from_slice(_: &[u8]) -> Result { - Ok(Self) - } - #[cfg(feature = "std")] - fn to_raw_vec(&self) -> Vec { - vec![] - } - fn as_slice(&self) -> &[u8] { - b"" - } - } impl Public for Dummy {} + impl Signature for Dummy {} + impl Pair for Dummy { type Public = Dummy; type Seed = Dummy; @@ -716,15 +687,15 @@ mod dummy { _: Iter, _: Option, ) -> Result<(Self, Option), DeriveError> { - Ok((Self, None)) + Ok((Self::default(), None)) } fn from_seed_slice(_: &[u8]) -> Result { - Ok(Self) + Ok(Self::default()) } fn sign(&self, _: &[u8]) -> Self::Signature { - Self + Self::default() } fn verify>(_: &Self::Signature, _: M, _: &Self::Public) -> bool { @@ -732,11 +703,11 @@ mod dummy { } fn public(&self) -> Self::Public { - Self + Self::default() } fn to_raw_vec(&self) -> Vec { - vec![] + Default::default() } } } @@ -845,7 +816,7 @@ pub trait Pair: CryptoType + Sized { /// The type used to represent a signature. Can be created from a key pair and a message /// and verified with the message and a public key. - type Signature: AsRef<[u8]>; + type Signature: Signature; /// Generate new secure (random) key pair. /// @@ -1218,6 +1189,8 @@ mod tests { use super::*; use crate::DeriveJunction; + struct TestCryptoTag; + #[derive(Clone, Eq, PartialEq, Debug)] enum TestPair { Generated, @@ -1226,59 +1199,33 @@ mod tests { Standard { phrase: String, password: Option, path: Vec }, Seed(Vec), } + impl Default for TestPair { fn default() -> Self { TestPair::Generated } } + impl CryptoType for TestPair { type Pair = Self; } - #[derive(Clone, PartialEq, Eq, Hash, Default)] - struct TestPublic; - impl AsRef<[u8]> for TestPublic { - fn as_ref(&self) -> &[u8] { - &[] - } - } - impl AsMut<[u8]> for TestPublic { - fn as_mut(&mut self) -> &mut [u8] { - &mut [] - } - } - impl<'a> TryFrom<&'a [u8]> for TestPublic { - type Error = (); + type TestPublic = PublicBytes<0, TestCryptoTag>; - fn try_from(data: &'a [u8]) -> Result { - Self::from_slice(data) - } - } impl CryptoType for TestPublic { type Pair = TestPair; } - impl Derive for TestPublic {} - impl ByteArray for TestPublic { - const LEN: usize = 0; - fn from_slice(bytes: &[u8]) -> Result { - if bytes.is_empty() { - Ok(Self) - } else { - Err(()) - } - } - fn as_slice(&self) -> &[u8] { - &[] - } - fn to_raw_vec(&self) -> Vec { - vec![] - } + + type TestSignature = SignatureBytes<0, TestCryptoTag>; + + impl CryptoType for TestSignature { + type Pair = TestPair; } - impl Public for TestPublic {} + impl Pair for TestPair { type Public = TestPublic; type Seed = [u8; 8]; - type Signature = [u8; 0]; + type Signature = TestSignature; fn generate() -> (Self, ::Seed) { (TestPair::Generated, [0u8; 8]) @@ -1327,7 +1274,7 @@ mod tests { } fn sign(&self, _message: &[u8]) -> Self::Signature { - [] + TestSignature::default() } fn verify>(_: &Self::Signature, _: M, _: &Self::Public) -> bool { @@ -1335,7 +1282,7 @@ mod tests { } fn public(&self) -> Self::Public { - TestPublic + TestPublic::default() } fn from_seed_slice(seed: &[u8]) -> Result { diff --git a/substrate/primitives/core/src/crypto_bytes.rs b/substrate/primitives/core/src/crypto_bytes.rs index 069878e165460ace85fbf44507a55b1b6de8fb6b..ee5f3482f743a6a63b3465c75fcdb6d2d82ab6e7 100644 --- a/substrate/primitives/core/src/crypto_bytes.rs +++ b/substrate/primitives/core/src/crypto_bytes.rs @@ -18,15 +18,27 @@ //! Generic byte array which can be specialized with a marker type. use crate::{ - crypto::{FromEntropy, UncheckedFrom}, + crypto::{CryptoType, Derive, FromEntropy, Public, Signature, UncheckedFrom}, hash::{H256, H512}, }; use codec::{Decode, Encode, MaxEncodedLen}; use core::marker::PhantomData; use scale_info::TypeInfo; + use sp_runtime_interface::pass_by::{self, PassBy, PassByInner}; +#[cfg(feature = "serde")] +use crate::crypto::Ss58Codec; +#[cfg(feature = "serde")] +use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; + +#[cfg(all(not(feature = "std"), feature = "serde"))] +use sp_std::alloc::{format, string::String}; + +pub use public_bytes::*; +pub use signature_bytes::*; + /// Generic byte array holding some crypto-related raw data. /// /// The type is generic over a constant length `N` and a "tag" `T` which @@ -231,14 +243,137 @@ impl CryptoBytes<64, T> { } } -/// Tag used for generic public key bytes. -pub struct PublicTag; +mod public_bytes { + use super::*; + + /// Tag used for generic public key bytes. + pub struct PublicTag; + + /// Generic encoded public key. + pub type PublicBytes = CryptoBytes; + + impl Derive for PublicBytes where Self: CryptoType {} + + impl Public for PublicBytes where Self: CryptoType {} + + impl sp_std::fmt::Debug for PublicBytes + where + Self: CryptoType, + { + #[cfg(feature = "std")] + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let s = self.to_ss58check(); + write!(f, "{} ({}...)", crate::hexdisplay::HexDisplay::from(&self.as_ref()), &s[0..8]) + } + + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + Ok(()) + } + } + + #[cfg(feature = "std")] + impl std::fmt::Display for PublicBytes + where + Self: CryptoType, + { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}", self.to_ss58check()) + } + } + + #[cfg(feature = "std")] + impl std::str::FromStr for PublicBytes + where + Self: CryptoType, + { + type Err = crate::crypto::PublicError; + + fn from_str(s: &str) -> Result { + Self::from_ss58check(s) + } + } + + #[cfg(feature = "serde")] + impl Serialize for PublicBytes + where + Self: CryptoType, + { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&self.to_ss58check()) + } + } + + #[cfg(feature = "serde")] + impl<'de, const N: usize, SubTag> Deserialize<'de> for PublicBytes + where + Self: CryptoType, + { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + Self::from_ss58check(&String::deserialize(deserializer)?) + .map_err(|e| de::Error::custom(format!("{:?}", e))) + } + } +} + +mod signature_bytes { + use super::*; + + /// Tag used for generic signature bytes. + pub struct SignatureTag; + + /// Generic encoded signature. + pub type SignatureBytes = CryptoBytes; -/// Generic encoded public key. -pub type PublicBytes = CryptoBytes; + impl Signature for SignatureBytes where Self: CryptoType {} -/// Tag used for generic signature bytes. -pub struct SignatureTag; + #[cfg(feature = "serde")] + impl Serialize for SignatureBytes + where + Self: CryptoType, + { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&array_bytes::bytes2hex("", self)) + } + } + + #[cfg(feature = "serde")] + impl<'de, const N: usize, SubTag> Deserialize<'de> for SignatureBytes + where + Self: CryptoType, + { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let signature_hex = array_bytes::hex2bytes(&String::deserialize(deserializer)?) + .map_err(|e| de::Error::custom(format!("{:?}", e)))?; + Self::try_from(signature_hex.as_ref()) + .map_err(|e| de::Error::custom(format!("{:?}", e))) + } + } -/// Generic encoded signature. -pub type SignatureBytes = CryptoBytes; + impl sp_std::fmt::Debug for SignatureBytes + where + Self: CryptoType, + { + #[cfg(feature = "std")] + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + write!(f, "{}", crate::hexdisplay::HexDisplay::from(&&self.0[..])) + } + + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + Ok(()) + } + } +} diff --git a/substrate/primitives/core/src/ecdsa.rs b/substrate/primitives/core/src/ecdsa.rs index 0e6b06a34dc9bf33cd8d9a80b3d90edb88131266..9cba8cc3d352a0b60317f570827015f4c11d5313 100644 --- a/substrate/primitives/core/src/ecdsa.rs +++ b/substrate/primitives/core/src/ecdsa.rs @@ -17,11 +17,9 @@ //! Simple ECDSA secp256k1 API. -#[cfg(feature = "serde")] -use crate::crypto::Ss58Codec; use crate::crypto::{ - CryptoType, CryptoTypeId, Derive, DeriveError, DeriveJunction, Pair as TraitPair, - Public as TraitPublic, PublicBytes, SecretStringError, SignatureBytes, + CryptoType, CryptoTypeId, DeriveError, DeriveJunction, Pair as TraitPair, PublicBytes, + SecretStringError, SignatureBytes, }; #[cfg(not(feature = "std"))] @@ -31,10 +29,6 @@ use secp256k1::{ ecdsa::{RecoverableSignature, RecoveryId}, Message, PublicKey, SecretKey, SECP256K1, }; -#[cfg(feature = "serde")] -use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; -#[cfg(all(not(feature = "std"), feature = "serde"))] -use sp_std::alloc::{format, string::String}; #[cfg(not(feature = "std"))] use sp_std::vec::Vec; @@ -80,10 +74,6 @@ impl Public { } } -impl TraitPublic for Public {} - -impl Derive for Public {} - #[cfg(feature = "std")] impl From for Public { fn from(pubkey: PublicKey) -> Self { @@ -106,85 +96,9 @@ impl From for Public { } } -#[cfg(feature = "std")] -impl std::fmt::Display for Public { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self.to_ss58check()) - } -} - -impl sp_std::fmt::Debug for Public { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - let s = self.to_ss58check(); - write!(f, "{} ({}...)", crate::hexdisplay::HexDisplay::from(&self.as_ref()), &s[0..8]) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } -} - -#[cfg(feature = "serde")] -impl Serialize for Public { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&self.to_ss58check()) - } -} - -#[cfg(feature = "serde")] -impl<'de> Deserialize<'de> for Public { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - Public::from_ss58check(&String::deserialize(deserializer)?) - .map_err(|e| de::Error::custom(format!("{:?}", e))) - } -} - /// A signature (a 512-bit value, plus 8 bits for recovery ID). pub type Signature = SignatureBytes; -#[cfg(feature = "serde")] -impl Serialize for Signature { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&array_bytes::bytes2hex("", self)) - } -} - -#[cfg(feature = "serde")] -impl<'de> Deserialize<'de> for Signature { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let signature_hex = array_bytes::hex2bytes(&String::deserialize(deserializer)?) - .map_err(|e| de::Error::custom(format!("{:?}", e)))?; - Signature::try_from(signature_hex.as_ref()) - .map_err(|e| de::Error::custom(format!("{:?}", e))) - } -} - -impl sp_std::fmt::Debug for Signature { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - write!(f, "{}", crate::hexdisplay::HexDisplay::from(&self.0)) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } -} - impl Signature { /// Recover the public key from this signature and a message. pub fn recover>(&self, message: M) -> Option { @@ -417,7 +331,7 @@ mod test { use super::*; use crate::crypto::{ set_default_ss58_version, PublicError, Ss58AddressFormat, Ss58AddressFormatRegistry, - DEV_PHRASE, + Ss58Codec, DEV_PHRASE, }; use serde_json; diff --git a/substrate/primitives/core/src/ed25519.rs b/substrate/primitives/core/src/ed25519.rs index 0dda7b95972d6aaff72c3bd628943c3bbf7b2ba9..a9494f2860b4c018d465eecb9eafe825e169292c 100644 --- a/substrate/primitives/core/src/ed25519.rs +++ b/substrate/primitives/core/src/ed25519.rs @@ -17,18 +17,13 @@ //! Simple Ed25519 API. -#[cfg(feature = "serde")] -use crate::crypto::Ss58Codec; use crate::crypto::{ - ByteArray, CryptoType, CryptoTypeId, Derive, DeriveError, DeriveJunction, Pair as TraitPair, - Public as TraitPublic, PublicBytes, SecretStringError, SignatureBytes, + ByteArray, CryptoType, CryptoTypeId, DeriveError, DeriveJunction, Pair as TraitPair, + PublicBytes, SecretStringError, SignatureBytes, }; use ed25519_zebra::{SigningKey, VerificationKey}; -#[cfg(feature = "serde")] -use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; -#[cfg(all(not(feature = "std"), feature = "serde"))] -use sp_std::alloc::{format, string::String}; + use sp_std::vec::Vec; /// An identifier used to match public keys against ed25519 keys @@ -51,105 +46,9 @@ pub struct Ed25519Tag; /// A public key. pub type Public = PublicBytes; -impl TraitPublic for Public {} - -impl Derive for Public {} - -#[cfg(feature = "full_crypto")] -impl From for Public { - fn from(x: Pair) -> Self { - x.public() - } -} - -#[cfg(feature = "std")] -impl std::str::FromStr for Public { - type Err = crate::crypto::PublicError; - - fn from_str(s: &str) -> Result { - Self::from_ss58check(s) - } -} - -#[cfg(feature = "std")] -impl std::fmt::Display for Public { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self.to_ss58check()) - } -} - -impl sp_std::fmt::Debug for Public { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - let s = self.to_ss58check(); - write!(f, "{} ({}...)", crate::hexdisplay::HexDisplay::from(&self.0), &s[0..8]) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } -} - -#[cfg(feature = "serde")] -impl Serialize for Public { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&self.to_ss58check()) - } -} - -#[cfg(feature = "serde")] -impl<'de> Deserialize<'de> for Public { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - Public::from_ss58check(&String::deserialize(deserializer)?) - .map_err(|e| de::Error::custom(format!("{:?}", e))) - } -} - /// A signature. pub type Signature = SignatureBytes; -#[cfg(feature = "serde")] -impl Serialize for Signature { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&array_bytes::bytes2hex("", self)) - } -} - -#[cfg(feature = "serde")] -impl<'de> Deserialize<'de> for Signature { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let signature_hex = array_bytes::hex2bytes(&String::deserialize(deserializer)?) - .map_err(|e| de::Error::custom(format!("{:?}", e)))?; - Signature::try_from(signature_hex.as_ref()) - .map_err(|e| de::Error::custom(format!("{:?}", e))) - } -} - -impl sp_std::fmt::Debug for Signature { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - write!(f, "{}", crate::hexdisplay::HexDisplay::from(&self.0)) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } -} - /// A key pair. #[derive(Copy, Clone)] pub struct Pair { @@ -253,8 +152,10 @@ impl CryptoType for Pair { } #[cfg(test)] -mod test { +mod tests { use super::*; + #[cfg(feature = "serde")] + use crate::crypto::Ss58Codec; use crate::crypto::DEV_PHRASE; use serde_json; diff --git a/substrate/primitives/core/src/lib.rs b/substrate/primitives/core/src/lib.rs index cf803c6fb49e3da294ebc8c291ae4478b355ecc4..098bd135bfebba0d83bb2f58e72b4779bdf441b7 100644 --- a/substrate/primitives/core/src/lib.rs +++ b/substrate/primitives/core/src/lib.rs @@ -56,27 +56,27 @@ pub mod const_hex2array; pub mod crypto; pub mod hexdisplay; pub use paste; - mod address_uri; -#[cfg(feature = "bandersnatch-experimental")] -pub mod bandersnatch; -#[cfg(feature = "bls-experimental")] -pub mod bls; -pub mod crypto_bytes; pub mod defer; -pub mod ecdsa; -pub mod ed25519; pub mod hash; #[cfg(feature = "std")] mod hasher; pub mod offchain; -pub mod paired_crypto; -pub mod sr25519; pub mod testing; #[cfg(feature = "std")] pub mod traits; pub mod uint; +#[cfg(feature = "bandersnatch-experimental")] +pub mod bandersnatch; +#[cfg(feature = "bls-experimental")] +pub mod bls; +pub mod crypto_bytes; +pub mod ecdsa; +pub mod ed25519; +pub mod paired_crypto; +pub mod sr25519; + #[cfg(feature = "bls-experimental")] pub use bls::{bls377, bls381}; #[cfg(feature = "bls-experimental")] diff --git a/substrate/primitives/core/src/paired_crypto.rs b/substrate/primitives/core/src/paired_crypto.rs index 27a7ab28dfd88992f6610111e1cc3adc16e43a6c..260e86b6ff9c4d759fd3794c6a91cc77aebd8dc0 100644 --- a/substrate/primitives/core/src/paired_crypto.rs +++ b/substrate/primitives/core/src/paired_crypto.rs @@ -19,20 +19,13 @@ use core::marker::PhantomData; -#[cfg(feature = "serde")] -use crate::crypto::Ss58Codec; use crate::crypto::{ - ByteArray, CryptoType, Derive, DeriveError, DeriveJunction, Pair as PairT, Public as PublicT, - PublicBytes, SecretStringError, SignatureBytes, UncheckedFrom, + ByteArray, CryptoType, DeriveError, DeriveJunction, Pair as PairT, Public as PublicT, + PublicBytes, SecretStringError, Signature as SignatureT, SignatureBytes, UncheckedFrom, }; use sp_std::vec::Vec; -#[cfg(feature = "serde")] -use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; -#[cfg(all(not(feature = "std"), feature = "serde"))] -use sp_std::alloc::{format, string::String}; - /// ECDSA and BLS12-377 paired crypto scheme #[cfg(feature = "bls-experimental")] pub mod ecdsa_bls377 { @@ -80,7 +73,7 @@ pub mod ecdsa_bls377 { #[cfg(feature = "full_crypto")] impl Pair { - /// Hashes the `message` with the specified [`Hasher`] before signing sith the ECDSA secret + /// Hashes the `message` with the specified [`Hasher`] before signing with the ECDSA secret /// component. /// /// The hasher does not affect the BLS12-377 component. This generates BLS12-377 Signature @@ -173,130 +166,10 @@ where } } -#[cfg(feature = "std")] -impl std::fmt::Display - for Public -where - Public: CryptoType, -{ - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self.to_ss58check()) - } -} - -impl sp_std::fmt::Debug - for Public -where - Public: CryptoType, - [u8; LEFT_PLUS_RIGHT_LEN]: crate::hexdisplay::AsBytesRef, -{ - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - let s = self.to_ss58check(); - write!(f, "{} ({}...)", crate::hexdisplay::HexDisplay::from(&self.0), &s[0..8]) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } -} - -#[cfg(feature = "serde")] -impl Serialize - for Public -where - Public: CryptoType, -{ - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&self.to_ss58check()) - } -} - -#[cfg(feature = "serde")] -impl<'de, const LEFT_PLUS_RIGHT_LEN: usize, SubTag: PairedCryptoSubTagBound> Deserialize<'de> - for Public -where - Public: CryptoType, -{ - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - Public::from_ss58check(&String::deserialize(deserializer)?) - .map_err(|e| de::Error::custom(format!("{:?}", e))) - } -} - -impl PublicT - for Public -where - Public: CryptoType, -{ -} - -impl Derive - for Public -{ -} - -/// Trait characterizing a signature which could be used as individual component of an -/// `paired_crypto:Signature` pair. -pub trait SignatureBound: ByteArray {} - -impl SignatureBound for T {} - /// A pair of signatures of different types pub type Signature = SignatureBytes; -#[cfg(feature = "serde")] -impl Serialize - for Signature -{ - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&array_bytes::bytes2hex("", self)) - } -} - -#[cfg(feature = "serde")] -impl<'de, const LEFT_PLUS_RIGHT_LEN: usize, SubTag> Deserialize<'de> - for Signature -{ - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let bytes = array_bytes::hex2bytes(&String::deserialize(deserializer)?) - .map_err(|e| de::Error::custom(format!("{:?}", e)))?; - Signature::::try_from(bytes.as_ref()).map_err(|e| { - de::Error::custom(format!("Error converting deserialized data into signature: {:?}", e)) - }) - } -} - -impl sp_std::fmt::Debug - for Signature -where - [u8; LEFT_PLUS_RIGHT_LEN]: crate::hexdisplay::AsBytesRef, -{ - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - write!(f, "{}", crate::hexdisplay::HexDisplay::from(&self.0)) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } -} - /// A key pair. pub struct Pair< LeftPair: PairT, @@ -332,9 +205,8 @@ impl< > PairT for Pair where Pair: CryptoType, - LeftPair::Signature: SignatureBound, - RightPair::Signature: SignatureBound, - Public: CryptoType, + Public: PublicT, + Signature: SignatureT, LeftPair::Seed: From + Into, RightPair::Seed: From + Into, { @@ -417,14 +289,14 @@ where // Test set exercising the (ECDSA,BLS12-377) implementation #[cfg(all(test, feature = "bls-experimental"))] -mod test { +mod tests { use super::*; - use crate::{crypto::DEV_PHRASE, KeccakHasher}; + #[cfg(feature = "serde")] + use crate::crypto::Ss58Codec; + use crate::{bls377, crypto::DEV_PHRASE, ecdsa, KeccakHasher}; use codec::{Decode, Encode}; use ecdsa_bls377::{Pair, Signature}; - use crate::{bls377, ecdsa}; - #[test] fn test_length_of_paired_ecdsa_and_bls377_public_key_and_signature_is_correct() { assert_eq!( diff --git a/substrate/primitives/core/src/sr25519.rs b/substrate/primitives/core/src/sr25519.rs index ee0546bc5341eb1525b65c72ffa20cfb452bbee9..54b9a98db3d2ca2b69af5ab77701e4e8c0b70a83 100644 --- a/substrate/primitives/core/src/sr25519.rs +++ b/substrate/primitives/core/src/sr25519.rs @@ -22,7 +22,9 @@ #[cfg(feature = "serde")] use crate::crypto::Ss58Codec; -use crate::crypto::{DeriveError, DeriveJunction, Pair as TraitPair, SecretStringError}; +use crate::crypto::{ + CryptoBytes, DeriveError, DeriveJunction, Pair as TraitPair, SecretStringError, +}; #[cfg(feature = "full_crypto")] use schnorrkel::signing_context; use schnorrkel::{ @@ -31,9 +33,7 @@ use schnorrkel::{ }; use sp_std::vec::Vec; -use crate::crypto::{ - CryptoType, CryptoTypeId, Derive, Public as TraitPublic, PublicBytes, SignatureBytes, -}; +use crate::crypto::{CryptoType, CryptoTypeId, Derive, Public as TraitPublic, SignatureBytes}; use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; @@ -59,20 +59,28 @@ pub const SIGNATURE_SERIALIZED_SIZE: usize = 64; #[doc(hidden)] pub struct Sr25519Tag; +#[doc(hidden)] +pub struct Sr25519PublicTag; /// An Schnorrkel/Ristretto x25519 ("sr25519") public key. -pub type Public = PublicBytes; +pub type Public = CryptoBytes; -/// An Schnorrkel/Ristretto x25519 ("sr25519") key pair. -pub struct Pair(Keypair); +impl TraitPublic for Public {} -impl Clone for Pair { - fn clone(&self) -> Self { - Pair(schnorrkel::Keypair { - public: self.0.public, - secret: schnorrkel::SecretKey::from_bytes(&self.0.secret.to_bytes()[..]) - .expect("key is always the correct size; qed"), - }) +impl Derive for Public { + /// Derive a child key from a series of given junctions. + /// + /// `None` if there are any hard junctions in there. + #[cfg(feature = "serde")] + fn derive>(&self, path: Iter) -> Option { + let mut acc = PublicKey::from_bytes(self.as_ref()).ok()?; + for j in path { + match j { + DeriveJunction::Soft(cc) => acc = acc.derived_key_simple(ChainCode(cc), &[]).0, + DeriveJunction::Hard(_cc) => return None, + } + } + Some(Self::from(acc.to_bytes())) } } @@ -129,29 +137,6 @@ impl<'de> Deserialize<'de> for Public { /// An Schnorrkel/Ristretto x25519 ("sr25519") signature. pub type Signature = SignatureBytes; -#[cfg(feature = "serde")] -impl Serialize for Signature { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&array_bytes::bytes2hex("", self)) - } -} - -#[cfg(feature = "serde")] -impl<'de> Deserialize<'de> for Signature { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let signature_hex = array_bytes::hex2bytes(&String::deserialize(deserializer)?) - .map_err(|e| de::Error::custom(format!("{:?}", e)))?; - Signature::try_from(signature_hex.as_ref()) - .map_err(|e| de::Error::custom(format!("{:?}", e))) - } -} - #[cfg(feature = "full_crypto")] impl From for Signature { fn from(s: schnorrkel::Signature) -> Signature { @@ -159,37 +144,19 @@ impl From for Signature { } } -impl sp_std::fmt::Debug for Signature { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - write!(f, "{}", crate::hexdisplay::HexDisplay::from(&self.0)) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } -} +/// An Schnorrkel/Ristretto x25519 ("sr25519") key pair. +pub struct Pair(Keypair); -impl Derive for Public { - /// Derive a child key from a series of given junctions. - /// - /// `None` if there are any hard junctions in there. - #[cfg(feature = "serde")] - fn derive>(&self, path: Iter) -> Option { - let mut acc = PublicKey::from_bytes(self.as_ref()).ok()?; - for j in path { - match j { - DeriveJunction::Soft(cc) => acc = acc.derived_key_simple(ChainCode(cc), &[]).0, - DeriveJunction::Hard(_cc) => return None, - } - } - Some(Self::from(acc.to_bytes())) +impl Clone for Pair { + fn clone(&self) -> Self { + Pair(schnorrkel::Keypair { + public: self.0.public, + secret: schnorrkel::SecretKey::from_bytes(&self.0.secret.to_bytes()[..]) + .expect("key is always the correct size; qed"), + }) } } -impl TraitPublic for Public {} - #[cfg(feature = "std")] impl From for Pair { fn from(sec: MiniSecretKey) -> Pair { diff --git a/substrate/primitives/inherents/Cargo.toml b/substrate/primitives/inherents/Cargo.toml index 6463c423fe7b7e8368c642af8fdf3d88974747ef..c08ac459de530e55d0064561fe7171e811840c8f 100644 --- a/substrate/primitives/inherents/Cargo.toml +++ b/substrate/primitives/inherents/Cargo.toml @@ -17,15 +17,15 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.74", optional = true } +async-trait = { version = "0.1.79", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" thiserror = { optional = true, workspace = true } sp-runtime = { path = "../runtime", default-features = false, optional = true } [dev-dependencies] -futures = "0.3.21" +futures = "0.3.30" [features] default = ["std"] diff --git a/substrate/primitives/inherents/src/client_side.rs b/substrate/primitives/inherents/src/client_side.rs index 27479de136f2dedb333883e642d7ddc899446c45..3c299dfa4eea0203e7853a437c712c6618c09b00 100644 --- a/substrate/primitives/inherents/src/client_side.rs +++ b/substrate/primitives/inherents/src/client_side.rs @@ -23,7 +23,7 @@ use sp_runtime::traits::Block as BlockT; /// It is possible for the caller to provide custom arguments to the callee by setting the /// `ExtraArgs` generic parameter. /// -/// The crate already provides some convience implementations of this trait for +/// The crate already provides some convince implementations of this trait for /// `Box` and closures. So, it should not be required to implement /// this trait manually. #[async_trait::async_trait] diff --git a/substrate/primitives/io/Cargo.toml b/substrate/primitives/io/Cargo.toml index e47775d56d9e55780c6e18c2d33e132407c83786..dddea4ffa232541023c91d354aa6fb2013332a98 100644 --- a/substrate/primitives/io/Cargo.toml +++ b/substrate/primitives/io/Cargo.toml @@ -85,7 +85,7 @@ disable_allocator = [] # This gives the caller direct programmatic access to the error message. # # When disabled the error message will only be printed out in the -# logs, with the caller receving a generic "wasm `unreachable` instruction executed" +# logs, with the caller receiving a generic "wasm `unreachable` instruction executed" # error message. # # This has no effect if both `disable_panic_handler` and `disable_oom` diff --git a/substrate/primitives/io/src/lib.rs b/substrate/primitives/io/src/lib.rs index 684854ea5c8d4568c60b8ede7b89cdf4c26a4c38..ec32b7290330fc7a1fd76dc4d7ce86210bcceba0 100644 --- a/substrate/primitives/io/src/lib.rs +++ b/substrate/primitives/io/src/lib.rs @@ -1081,7 +1081,7 @@ pub trait Crypto { /// Register a `ecdsa` signature for batch verification. /// /// Batch verification must be enabled by calling [`start_batch_verify`]. - /// If batch verification is not enabled, the signature will be verified immediatley. + /// If batch verification is not enabled, the signature will be verified immediately. /// To get the result of the batch verification, [`finish_batch_verify`] /// needs to be called. /// @@ -1696,9 +1696,9 @@ mod tracing_setup { /// The PassingTracingSubscriber implements `tracing_core::Subscriber` /// and pushes the information across the runtime interface to the host - struct PassingTracingSubsciber; + struct PassingTracingSubscriber; - impl tracing_core::Subscriber for PassingTracingSubsciber { + impl tracing_core::Subscriber for PassingTracingSubscriber { fn enabled(&self, metadata: &Metadata<'_>) -> bool { wasm_tracing::enabled(Crossing(metadata.into())) } @@ -1731,7 +1731,7 @@ mod tracing_setup { /// set the global bridging subscriber once. pub fn init_tracing() { if TRACING_SET.load(Ordering::Relaxed) == false { - set_global_default(Dispatch::new(PassingTracingSubsciber {})) + set_global_default(Dispatch::new(PassingTracingSubscriber {})) .expect("We only ever call this once"); TRACING_SET.store(true, Ordering::Relaxed); } diff --git a/substrate/primitives/keyring/Cargo.toml b/substrate/primitives/keyring/Cargo.toml index 940fe90916d25aabef032437f0a6e0a05d270464..7471e9cf8ffa77d0115192f97f9edcc541640794 100644 --- a/substrate/primitives/keyring/Cargo.toml +++ b/substrate/primitives/keyring/Cargo.toml @@ -17,7 +17,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -strum = { version = "0.24.1", features = ["derive"], default-features = false } +strum = { version = "0.26.2", features = ["derive"], default-features = false } sp-core = { path = "../core", default-features = false } sp-runtime = { path = "../runtime", default-features = false } diff --git a/substrate/primitives/maybe-compressed-blob/Cargo.toml b/substrate/primitives/maybe-compressed-blob/Cargo.toml index fa5383d03b10d5c6560e50ff28be7b6e71ffe2b0..178c915ce837efeccad42e52e1cb57bf30d5ff72 100644 --- a/substrate/primitives/maybe-compressed-blob/Cargo.toml +++ b/substrate/primitives/maybe-compressed-blob/Cargo.toml @@ -6,7 +6,7 @@ edition.workspace = true license = "Apache-2.0" homepage = "https://substrate.io" repository.workspace = true -description = "Handling of blobs, usually Wasm code, which may be compresed" +description = "Handling of blobs, usually Wasm code, which may be compressed" documentation = "https://docs.rs/sp-maybe-compressed-blob" readme = "README.md" diff --git a/substrate/primitives/merkle-mountain-range/Cargo.toml b/substrate/primitives/merkle-mountain-range/Cargo.toml index 9c07f699b37a419c972bfaf34854f376eae5b800..891f893a0c96e5d10cdb953190a1830b31ef56e4 100644 --- a/substrate/primitives/merkle-mountain-range/Cargo.toml +++ b/substrate/primitives/merkle-mountain-range/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } log = { workspace = true } mmr-lib = { package = "ckb-merkle-mountain-range", version = "0.5.2", default-features = false } serde = { features = ["alloc", "derive"], optional = true, workspace = true } diff --git a/substrate/primitives/metadata-ir/Cargo.toml b/substrate/primitives/metadata-ir/Cargo.toml index 31c839b5c485e71a09aaf2f4966c0ed8443aaab5..ca8408d0ad97c0c5b880b8b60b59d693e7652b2f 100644 --- a/substrate/primitives/metadata-ir/Cargo.toml +++ b/substrate/primitives/metadata-ir/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } frame-metadata = { version = "16.0.0", default-features = false, features = ["current"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } [features] default = ["std"] diff --git a/substrate/primitives/metadata-ir/src/types.rs b/substrate/primitives/metadata-ir/src/types.rs index b107d20a8e2bfbf55d0d558bb60346cb7a52dd70..b05f26ff55d4e408e1d8459ba5539f44423b2c18 100644 --- a/substrate/primitives/metadata-ir/src/types.rs +++ b/substrate/primitives/metadata-ir/src/types.rs @@ -160,7 +160,7 @@ pub struct ExtrinsicMetadataIR { pub ty: T::Type, /// Extrinsic version. pub version: u8, - /// The type of the address that signes the extrinsic + /// The type of the address that signs the extrinsic pub address_ty: T::Type, /// The type of the outermost Call enum. pub call_ty: T::Type, diff --git a/substrate/primitives/mixnet/Cargo.toml b/substrate/primitives/mixnet/Cargo.toml index 8ba7f36da43c144de727cba6f8d8427323be4320..07840ca63cb2b8eeaa1773146cf34a74d889b4c8 100644 --- a/substrate/primitives/mixnet/Cargo.toml +++ b/substrate/primitives/mixnet/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-api = { default-features = false, path = "../api" } sp-application-crypto = { default-features = false, path = "../application-crypto" } diff --git a/substrate/primitives/npos-elections/Cargo.toml b/substrate/primitives/npos-elections/Cargo.toml index b0b9890c061918aa25d5cc47129c7d1d29b1300d..afa59af64d6d8599a9835f448f9a2031831dc694 100644 --- a/substrate/primitives/npos-elections/Cargo.toml +++ b/substrate/primitives/npos-elections/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-arithmetic = { path = "../arithmetic", default-features = false } sp-core = { path = "../core", default-features = false } diff --git a/substrate/primitives/npos-elections/src/pjr.rs b/substrate/primitives/npos-elections/src/pjr.rs index 08e40a014eab79e00836be5c6d323b09b44b8f94..6e3775199a21944a96b908536485009204c14b4d 100644 --- a/substrate/primitives/npos-elections/src/pjr.rs +++ b/substrate/primitives/npos-elections/src/pjr.rs @@ -261,7 +261,7 @@ fn prepare_pjr_input( } } - // Convert Suppports into a SupportMap + // Convert Supports into a SupportMap // // As a flat list, we're limited to linear search. That gives the production of `candidates`, // below, a complexity of `O(s*c)`, where `s == supports.len()` and `c == all_candidates.len()`. diff --git a/substrate/primitives/npos-elections/src/reduce.rs b/substrate/primitives/npos-elections/src/reduce.rs index c9ed493daf30895e86a28016a3ab0762c682b40b..3fd291f88abefada6812c971d379c9934eae7c2f 100644 --- a/substrate/primitives/npos-elections/src/reduce.rs +++ b/substrate/primitives/npos-elections/src/reduce.rs @@ -393,7 +393,7 @@ fn reduce_all(assignments: &mut Vec>) -> u32 // voter_root_path.last().unwrap()); TODO: @kian // the common path must be non-void.. debug_assert!(common_count > 0); - // and smaller than btoh + // and smaller than both debug_assert!(common_count <= voter_root_path.len()); debug_assert!(common_count <= target_root_path.len()); diff --git a/substrate/primitives/runtime-interface/src/lib.rs b/substrate/primitives/runtime-interface/src/lib.rs index 8b0edf1ec818e57237e6170c8d27f2d1f492cc46..f6ef27789b36f08a6a5e7a58035304cab345737e 100644 --- a/substrate/primitives/runtime-interface/src/lib.rs +++ b/substrate/primitives/runtime-interface/src/lib.rs @@ -283,7 +283,7 @@ pub use sp_std; /// /// `key` holds the pointer and the length to the `data` slice. /// pub fn call(data: &[u8]) -> Vec { /// extern "C" { pub fn ext_call_version_2(key: u64); } -/// // Should call into extenal `ext_call_version_2(<[u8] as IntoFFIValue>::into_ffi_value(key))` +/// // Should call into external `ext_call_version_2(<[u8] as IntoFFIValue>::into_ffi_value(key))` /// // But this is too much to replicate in a doc test so here we just return a dummy vector. /// // Note that we jump into the latest version not marked as `register_only` (i.e. version 2). /// Vec::new() diff --git a/substrate/primitives/runtime-interface/test-wasm-deprecated/src/lib.rs b/substrate/primitives/runtime-interface/test-wasm-deprecated/src/lib.rs index 2f42e60504eb77af41c9107f9a5bb1178ffaf0ea..871a4922ce3ab13e147f1b4f5039021e309b1132 100644 --- a/substrate/primitives/runtime-interface/test-wasm-deprecated/src/lib.rs +++ b/substrate/primitives/runtime-interface/test-wasm-deprecated/src/lib.rs @@ -45,19 +45,19 @@ pub fn import_sp_io() { #[runtime_interface] pub trait TestApi { - fn test_versionning(&self, _data: u32) -> bool { + fn test_versioning(&self, _data: u32) -> bool { // should not be called unimplemented!() } } wasm_export_functions! { - fn test_versionning_works() { + fn test_versioning_works() { // old api allows only 42 and 50 - assert!(test_api::test_versionning(42)); - assert!(test_api::test_versionning(50)); + assert!(test_api::test_versioning(42)); + assert!(test_api::test_versioning(50)); - assert!(!test_api::test_versionning(142)); - assert!(!test_api::test_versionning(0)); + assert!(!test_api::test_versioning(142)); + assert!(!test_api::test_versioning(0)); } } diff --git a/substrate/primitives/runtime-interface/test-wasm/src/lib.rs b/substrate/primitives/runtime-interface/test-wasm/src/lib.rs index cf1ff3bca088fa7ba25047f48a452d775679c0fc..2b3fc728f6ff46cc1332e413d8dee965f6ec7e3d 100644 --- a/substrate/primitives/runtime-interface/test-wasm/src/lib.rs +++ b/substrate/primitives/runtime-interface/test-wasm/src/lib.rs @@ -126,21 +126,21 @@ pub trait TestApi { val } - fn test_versionning(&self, data: u32) -> bool { + fn test_versioning(&self, data: u32) -> bool { data == 42 || data == 50 } #[version(2)] - fn test_versionning(&self, data: u32) -> bool { + fn test_versioning(&self, data: u32) -> bool { data == 42 } - fn test_versionning_register_only(&self, data: u32) -> bool { + fn test_versioning_register_only(&self, data: u32) -> bool { data == 80 } #[version(2, register_only)] - fn test_versionning_register_only(&self, data: u32) -> bool { + fn test_versioning_register_only(&self, data: u32) -> bool { data == 42 } @@ -282,21 +282,21 @@ wasm_export_functions! { assert_eq!(0, len); } - fn test_versionning_works() { + fn test_versioning_works() { // we fix new api to accept only 42 as a proper input // as opposed to sp-runtime-interface-test-wasm-deprecated::test_api::verify_input // which accepted 42 and 50. - assert!(test_api::test_versionning(42)); + assert!(test_api::test_versioning(42)); - assert!(!test_api::test_versionning(50)); - assert!(!test_api::test_versionning(102)); + assert!(!test_api::test_versioning(50)); + assert!(!test_api::test_versioning(102)); } - fn test_versionning_register_only_works() { + fn test_versioning_register_only_works() { // Ensure that we will import the version of the runtime interface function that // isn't tagged with `register_only`. - assert!(!test_api::test_versionning_register_only(42)); - assert!(test_api::test_versionning_register_only(80)); + assert!(!test_api::test_versioning_register_only(42)); + assert!(test_api::test_versioning_register_only(80)); } fn test_return_input_as_tuple() { diff --git a/substrate/primitives/runtime-interface/test/src/lib.rs b/substrate/primitives/runtime-interface/test/src/lib.rs index 215704a1121543f8290623273a1ad6be8295389d..05a955fbe3f86106ab1e95a67e6c7c0074770ac4 100644 --- a/substrate/primitives/runtime-interface/test/src/lib.rs +++ b/substrate/primitives/runtime-interface/test/src/lib.rs @@ -163,18 +163,18 @@ fn test_array_return_value_memory_is_freed() { } #[test] -fn test_versionining_with_new_host_works() { +fn test_versioning_with_new_host_works() { // We call to the new wasm binary with new host function. - call_wasm_method::(wasm_binary_unwrap(), "test_versionning_works"); + call_wasm_method::(wasm_binary_unwrap(), "test_versioning_works"); // we call to the old wasm binary with a new host functions // old versions of host functions should be called and test should be ok! - call_wasm_method::(wasm_binary_deprecated_unwrap(), "test_versionning_works"); + call_wasm_method::(wasm_binary_deprecated_unwrap(), "test_versioning_works"); } #[test] -fn test_versionining_register_only() { - call_wasm_method::(wasm_binary_unwrap(), "test_versionning_register_only_works"); +fn test_versioning_register_only() { + call_wasm_method::(wasm_binary_unwrap(), "test_versioning_register_only_works"); } fn run_test_in_another_process( diff --git a/substrate/primitives/runtime/Cargo.toml b/substrate/primitives/runtime/Cargo.toml index cacfc059722942f2fe70d951f8016449914c2e5b..fb5fd60fbbfd90260127fc8ec9d3750badc4b5ba 100644 --- a/substrate/primitives/runtime/Cargo.toml +++ b/substrate/primitives/runtime/Cargo.toml @@ -24,7 +24,7 @@ impl-trait-for-tuples = "0.2.2" log = { workspace = true } paste = "1.0" rand = { version = "0.8.5", optional = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-application-crypto = { path = "../application-crypto", default-features = false } sp-arithmetic = { path = "../arithmetic", default-features = false } @@ -32,7 +32,7 @@ sp-core = { path = "../core", default-features = false } sp-io = { path = "../io", default-features = false } sp-std = { path = "../std", default-features = false } sp-weights = { path = "../weights", default-features = false } -docify = { version = "0.2.7" } +docify = "0.2.8" simple-mermaid = { version = "0.1.1", optional = true } diff --git a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs index 5b54caf597b7396dd5f32b0f695db2e011a9fca1..8f6c0c6f650daa36f18f132e3fc0fa13eaa76621 100644 --- a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs +++ b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs @@ -40,7 +40,7 @@ use sp_std::{fmt, prelude::*}; /// the decoding fails. const EXTRINSIC_FORMAT_VERSION: u8 = 4; -/// The `SingaturePayload` of `UncheckedExtrinsic`. +/// The `SignaturePayload` of `UncheckedExtrinsic`. type UncheckedSignaturePayload = (Address, Signature, Extra); /// An extrinsic right from the external world. This is unchecked and so can contain a signature. diff --git a/substrate/primitives/runtime/src/offchain/storage_lock.rs b/substrate/primitives/runtime/src/offchain/storage_lock.rs index 56d0eeae527cf072c4b7756690956ab192c5e805..cfdaa954fe5e8e79e276f3e29bd0df7a1cc63267 100644 --- a/substrate/primitives/runtime/src/offchain/storage_lock.rs +++ b/substrate/primitives/runtime/src/offchain/storage_lock.rs @@ -556,7 +556,7 @@ mod tests { let res = lock.try_lock(); assert_eq!(res.is_ok(), false); - // sleep again untill sleep_until > deadline + // sleep again until sleep_until > deadline offchain::sleep_until(offchain::timestamp().add(Duration::from_millis(200))); // the lock has expired, failed to extend it diff --git a/substrate/primitives/runtime/src/runtime_string.rs b/substrate/primitives/runtime/src/runtime_string.rs index aa0bd52e56fe49a96bd2bd4ea87bf0d62ee2be34..607ae59db632f7d5044d48cae4dab4b78d24ad41 100644 --- a/substrate/primitives/runtime/src/runtime_string.rs +++ b/substrate/primitives/runtime/src/runtime_string.rs @@ -61,6 +61,19 @@ impl From<&'static str> for RuntimeString { } } +impl<'a> TryFrom<&'a RuntimeString> for &'a str { + type Error = core::str::Utf8Error; + fn try_from(from: &'a RuntimeString) -> core::result::Result<&'a str, Self::Error> { + match from { + #[cfg(feature = "std")] + RuntimeString::Owned(string) => Ok(string.as_str()), + #[cfg(not(feature = "std"))] + RuntimeString::Owned(vec) => core::str::from_utf8(&vec), + RuntimeString::Borrowed(str) => Ok(str), + } + } +} + #[cfg(feature = "std")] impl From for String { fn from(string: RuntimeString) -> Self { diff --git a/substrate/primitives/runtime/src/testing.rs b/substrate/primitives/runtime/src/testing.rs index 5f94c834a8f295bee945e9d772019e12ed37c828..b4aeda5a0e7a3e75a44c9d50b4ed22788d1f96e8 100644 --- a/substrate/primitives/runtime/src/testing.rs +++ b/substrate/primitives/runtime/src/testing.rs @@ -284,9 +284,9 @@ where } /// The signature payload of a `TestXt`. -type TxSingaturePayload = (u64, Extra); +type TxSignaturePayload = (u64, Extra); -impl SignaturePayload for TxSingaturePayload { +impl SignaturePayload for TxSignaturePayload { type SignatureAddress = u64; type Signature = (); type SignatureExtra = Extra; @@ -299,7 +299,7 @@ impl SignaturePayload for TxSingaturePayload { #[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo)] pub struct TestXt { /// Signature of the extrinsic. - pub signature: Option>, + pub signature: Option>, /// Call of the extrinsic. pub call: Call, } @@ -348,7 +348,7 @@ impl traits::Extrinsic for TestXt { type Call = Call; - type SignaturePayload = TxSingaturePayload; + type SignaturePayload = TxSignaturePayload; fn is_signed(&self) -> Option { Some(self.signature.is_some()) diff --git a/substrate/primitives/runtime/src/traits.rs b/substrate/primitives/runtime/src/traits.rs index 9ee41339d5089f5ec74e5c28a6b50aeba83a8051..5a6c306dd2a10d8a8e133ff89fedb8793f87d66b 100644 --- a/substrate/primitives/runtime/src/traits.rs +++ b/substrate/primitives/runtime/src/traits.rs @@ -330,7 +330,7 @@ impl> Morph for MorphInto { } } -/// Implementation of `TryMorph` which attmepts to convert between types using `TryInto`. +/// Implementation of `TryMorph` which attempts to convert between types using `TryInto`. pub struct TryMorphInto(sp_std::marker::PhantomData); impl> TryMorph for TryMorphInto { type Outcome = T; @@ -1449,7 +1449,7 @@ pub trait Dispatchable { /// to represent the dispatch class and weight. type Info; /// Additional information that is returned by `dispatch`. Can be used to supply the caller - /// with information about a `Dispatchable` that is ownly known post dispatch. + /// with information about a `Dispatchable` that is only known post dispatch. type PostInfo: Eq + PartialEq + Clone + Copy + Encode + Decode + Printable; /// Actually dispatch this call and return the result of it. fn dispatch(self, origin: Self::RuntimeOrigin) diff --git a/substrate/primitives/session/Cargo.toml b/substrate/primitives/session/Cargo.toml index 784228c42e9bf3bd245e77b74e10f78e26bdd53d..cdee4fb03e1254e04e0a068908a380ed28350bee 100644 --- a/substrate/primitives/session/Cargo.toml +++ b/substrate/primitives/session/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-api = { path = "../api", default-features = false } sp-core = { path = "../core", default-features = false } sp-runtime = { path = "../runtime", optional = true } diff --git a/substrate/primitives/staking/Cargo.toml b/substrate/primitives/staking/Cargo.toml index 6304551b8e60e3e2c9030b2addcc83f3e95b01de..e380abb6a8c070a861b8a84db473a66173aeaacd 100644 --- a/substrate/primitives/staking/Cargo.toml +++ b/substrate/primitives/staking/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { features = ["alloc", "derive"], optional = true, workspace = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" sp-core = { path = "../core", default-features = false } diff --git a/substrate/primitives/staking/src/offence.rs b/substrate/primitives/staking/src/offence.rs index 64aa4692eb592733a87a1bdf3dd2c934516ce102..30d96d0cbafce9f0540ffbb49cf15c7375ea405f 100644 --- a/substrate/primitives/staking/src/offence.rs +++ b/substrate/primitives/staking/src/offence.rs @@ -117,7 +117,7 @@ pub trait Offence { /// Errors that may happen on offence reports. #[derive(PartialEq, sp_runtime::RuntimeDebug)] pub enum OffenceError { - /// The report has already been sumbmitted. + /// The report has already been submitted. DuplicateReport, /// Other error has happened. diff --git a/substrate/primitives/state-machine/src/ext.rs b/substrate/primitives/state-machine/src/ext.rs index 73110f55cba2e6a83ab19d59678d533ae4ddc09b..9aa32bc866cfab9e2db44959c93bc4fe0b11e120 100644 --- a/substrate/primitives/state-machine/src/ext.rs +++ b/substrate/primitives/state-machine/src/ext.rs @@ -44,7 +44,7 @@ const EXT_NOT_ALLOWED_TO_FAIL: &str = "Externalities not allowed to fail within const BENCHMARKING_FN: &str = "\ This is a special fn only for benchmarking where a database commit happens from the runtime. For that reason client started transactions before calling into runtime are not allowed. - Without client transactions the loop condition garantuees the success of the tx close."; + Without client transactions the loop condition guarantees the success of the tx close."; #[cfg(feature = "std")] fn guard() -> sp_panic_handler::AbortGuard { @@ -722,7 +722,7 @@ impl Encode for EncodeOpaqueValue { } } -/// Auxialiary structure for appending a value to a storage item. +/// Auxiliary structure for appending a value to a storage item. pub(crate) struct StorageAppend<'a>(&'a mut Vec); impl<'a> StorageAppend<'a> { diff --git a/substrate/primitives/state-machine/src/lib.rs b/substrate/primitives/state-machine/src/lib.rs index 200cebe68de5de33216041802c64ecfd19d8e537..13087431d387b9e986519bbdcabfe9eb3c4fe5c5 100644 --- a/substrate/primitives/state-machine/src/lib.rs +++ b/substrate/primitives/state-machine/src/lib.rs @@ -1451,7 +1451,7 @@ mod tests { enum Item { InitializationItem, DiscardedItem, - CommitedItem, + CommittedItem, } let key = b"events".to_vec(); @@ -1488,21 +1488,21 @@ mod tests { assert_eq!(ext.storage(key.as_slice()), Some(vec![Item::InitializationItem].encode())); - ext.storage_append(key.clone(), Item::CommitedItem.encode()); + ext.storage_append(key.clone(), Item::CommittedItem.encode()); assert_eq!( ext.storage(key.as_slice()), - Some(vec![Item::InitializationItem, Item::CommitedItem].encode()), + Some(vec![Item::InitializationItem, Item::CommittedItem].encode()), ); } overlay.start_transaction(); - // Then only initlaization item and second (committed) item should persist. + // Then only initialization item and second (committed) item should persist. { let ext = Ext::new(&mut overlay, backend, None); assert_eq!( ext.storage(key.as_slice()), - Some(vec![Item::InitializationItem, Item::CommitedItem].encode()), + Some(vec![Item::InitializationItem, Item::CommittedItem].encode()), ); } } @@ -1866,7 +1866,7 @@ mod tests { // a inner hashable node (&b"k"[..], Some(&long_vec[..])), // need to ensure this is not an inline node - // otherwhise we do not know what is accessed when + // otherwise we do not know what is accessed when // storing proof. (&b"key1"[..], Some(&vec![5u8; 32][..])), (&b"key2"[..], Some(&b"val3"[..])), diff --git a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs index a25a5b810522d63c4888f973316c4d05dfbf599e..601bc2e29198561d501e1f20b11c2323c0064989 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -47,7 +47,7 @@ pub struct NoOpenTransaction; #[cfg_attr(test, derive(PartialEq))] pub struct AlreadyInRuntime; -/// Error when calling `exit_runtime` when not being in runtime exection mdde. +/// Error when calling `exit_runtime` when not being in runtime execution mode. #[derive(Debug)] #[cfg_attr(test, derive(PartialEq))] pub struct NotInRuntime; @@ -269,7 +269,7 @@ impl OverlayedMap { /// /// Panics: /// Panics if there are open transactions: `transaction_depth() > 0` - pub fn drain_commited(self) -> impl Iterator { + pub fn drain_committed(self) -> impl Iterator { assert!(self.transaction_depth() == 0, "Drain is not allowed with open transactions."); self.changes.into_iter().map(|(k, mut v)| (k, v.pop_transaction().value)) } @@ -281,7 +281,7 @@ impl OverlayedMap { self.dirty_keys.len() } - /// Call this before transfering control to the runtime. + /// Call this before transferring control to the runtime. /// /// This protects all existing transactions from being removed by the runtime. /// Calling this while already inside the runtime will return an error. @@ -471,7 +471,7 @@ mod test { } fn assert_drained_changes(is: OverlayedChangeSet, expected: Changes) { - let is = is.drain_commited().collect::>(); + let is = is.drain_committed().collect::>(); let expected = expected .iter() .map(|(k, v)| (k.to_vec(), v.0.map(From::from))) @@ -480,7 +480,7 @@ mod test { } fn assert_drained(is: OverlayedChangeSet, expected: Drained) { - let is = is.drain_commited().collect::>(); + let is = is.drain_committed().collect::>(); let expected = expected .iter() .map(|(k, v)| (k.to_vec(), v.map(From::from))) @@ -526,7 +526,7 @@ mod test { changeset.set(b"key0".to_vec(), Some(b"val0-rolled".to_vec()), Some(1000)); changeset.set(b"key5".to_vec(), Some(b"val5-rolled".to_vec()), None); - // changes contain all changes not only the commmited ones. + // changes contain all changes not only the committed ones. let all_changes: Changes = vec![ (b"key0", (Some(b"val0-rolled"), vec![1, 10, 1000])), (b"key1", (Some(b"val1"), vec![1])), @@ -807,7 +807,7 @@ mod test { fn drain_with_open_transaction_panics() { let mut changeset = OverlayedChangeSet::default(); changeset.start_transaction(); - let _ = changeset.drain_commited(); + let _ = changeset.drain_committed(); } #[test] diff --git a/substrate/primitives/state-machine/src/overlayed_changes/mod.rs b/substrate/primitives/state-machine/src/overlayed_changes/mod.rs index 039631e4a63038834c502531a1283b0d12ea3f0f..d6fc404e84fb5256656c5849d8f52894f37bfa02 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/mod.rs @@ -480,7 +480,7 @@ impl OverlayedChanges { Ok(()) } - /// Call this before transfering control to the runtime. + /// Call this before transferring control to the runtime. /// /// This protects all existing transactions from being removed by the runtime. /// Calling this while already inside the runtime will return an error. @@ -575,10 +575,10 @@ impl OverlayedChanges { }; use core::mem::take; - let main_storage_changes = take(&mut self.top).drain_commited(); + let main_storage_changes = take(&mut self.top).drain_committed(); let child_storage_changes = take(&mut self.children) .into_iter() - .map(|(key, (val, info))| (key, (val.drain_commited(), info))); + .map(|(key, (val, info))| (key, (val.drain_committed(), info))); let offchain_storage_changes = self.offchain_drain_committed().collect(); @@ -809,7 +809,7 @@ pub struct OverlayedExtensions<'a> { #[cfg(feature = "std")] impl<'a> OverlayedExtensions<'a> { - /// Create a new instance of overalyed extensions from the given extensions. + /// Create a new instance of overlaid extensions from the given extensions. pub fn new(extensions: &'a mut Extensions) -> Self { Self { extensions: extensions diff --git a/substrate/primitives/state-machine/src/testing.rs b/substrate/primitives/state-machine/src/testing.rs index 0eb7b6d1118f9aa3cff1caee089d526b3f711d94..e19ba95755c1b16f5cc6a2d59a8ede48997707f0 100644 --- a/substrate/primitives/state-machine/src/testing.rs +++ b/substrate/primitives/state-machine/src/testing.rs @@ -417,7 +417,7 @@ mod tests { original_ext.backend.clone().into_storage(), ); - // Ensure all have the correct ref counrt + // Ensure all have the correct ref count assert!(original_ext.backend.backend_storage().keys().values().all(|r| *r == 2)); // Drain the raw storage and root. diff --git a/substrate/primitives/state-machine/src/trie_backend_essence.rs b/substrate/primitives/state-machine/src/trie_backend_essence.rs index 1824a77c37040b00455379505ad2a542de1c8f89..a1f0057987411fc14d96c0c817659d585c4477f0 100644 --- a/substrate/primitives/state-machine/src/trie_backend_essence.rs +++ b/substrate/primitives/state-machine/src/trie_backend_essence.rs @@ -34,8 +34,8 @@ use parking_lot::RwLock; use sp_core::storage::{ChildInfo, ChildType, StateVersion}; use sp_trie::{ child_delta_trie_root, delta_trie_root, empty_child_trie_root, - read_child_trie_first_descedant_value, read_child_trie_hash, read_child_trie_value, - read_trie_first_descedant_value, read_trie_value, + read_child_trie_first_descendant_value, read_child_trie_hash, read_child_trie_value, + read_trie_first_descendant_value, read_trie_value, trie_types::{TrieDBBuilder, TrieError}, DBValue, KeySpacedDB, MerkleValue, NodeCodec, PrefixedMemoryDB, Trie, TrieCache, TrieDBRawIterator, TrieRecorder, TrieRecorderProvider, @@ -554,7 +554,7 @@ where let map_e = |e| format!("Trie lookup error: {}", e); self.with_recorder_and_cache(None, |recorder, cache| { - read_trie_first_descedant_value::, _>(self, &self.root, key, recorder, cache) + read_trie_first_descendant_value::, _>(self, &self.root, key, recorder, cache) .map_err(map_e) }) } @@ -570,7 +570,7 @@ where let map_e = |e| format!("Trie lookup error: {}", e); self.with_recorder_and_cache(Some(child_root), |recorder, cache| { - read_child_trie_first_descedant_value::, _>( + read_child_trie_first_descendant_value::, _>( child_info.keyspace(), self, &child_root, diff --git a/substrate/primitives/statement-store/Cargo.toml b/substrate/primitives/statement-store/Cargo.toml index 000fcd9870403872b1399466324eb18f728f2b41..b36bff69a007c669724950ca94f2cebc859890ed 100644 --- a/substrate/primitives/statement-store/Cargo.toml +++ b/substrate/primitives/statement-store/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-core = { path = "../core", default-features = false } sp-crypto-hashing = { path = "../crypto/hashing", default-features = false } sp-runtime = { path = "../runtime", default-features = false } diff --git a/substrate/primitives/statement-store/src/ecies.rs b/substrate/primitives/statement-store/src/ecies.rs index 80a040fd4c8e03de91bdd851bb635d506c20208f..6fa16658e0071c78d99fd3b88f4bdbca6af551a6 100644 --- a/substrate/primitives/statement-store/src/ecies.rs +++ b/substrate/primitives/statement-store/src/ecies.rs @@ -148,7 +148,7 @@ mod test { #[test] fn basic_ed25519_encryption() { let (pair, _) = sp_core::ed25519::Pair::generate(); - let pk = pair.into(); + let pk = pair.public(); let plain_message = b"An important secret message"; let encrypted = encrypt_ed25519(&pk, plain_message).unwrap(); diff --git a/substrate/primitives/storage/src/lib.rs b/substrate/primitives/storage/src/lib.rs index b55cc8f21740509b98f535c3f66e3dc5ece26ead..197994f574719ec0b000b8dbd79f333c71291708 100644 --- a/substrate/primitives/storage/src/lib.rs +++ b/substrate/primitives/storage/src/lib.rs @@ -452,7 +452,7 @@ impl TryFrom for StateVersion { impl StateVersion { /// If defined, values in state of size bigger or equal /// to this threshold will use a separate trie node. - /// Otherwhise, value will be inlined in branch or leaf + /// Otherwise, value will be inlined in branch or leaf /// node. pub fn state_value_threshold(&self) -> Option { match self { diff --git a/substrate/primitives/test-primitives/Cargo.toml b/substrate/primitives/test-primitives/Cargo.toml index 1b51c5f591904335d547a052a7b035ea3f6cd108..0513555431561a4adb12f5e2b373859df1272af2 100644 --- a/substrate/primitives/test-primitives/Cargo.toml +++ b/substrate/primitives/test-primitives/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], optional = true, workspace = true } sp-application-crypto = { path = "../application-crypto", default-features = false } sp-core = { path = "../core", default-features = false } diff --git a/substrate/primitives/timestamp/Cargo.toml b/substrate/primitives/timestamp/Cargo.toml index 11afb1755908880fc63a52abb1a659609acddccd..5a1d4fcc985c520eede135b59d8fe5311fd8feac 100644 --- a/substrate/primitives/timestamp/Cargo.toml +++ b/substrate/primitives/timestamp/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.74", optional = true } +async-trait = { version = "0.1.79", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } thiserror = { optional = true, workspace = true } sp-inherents = { path = "../inherents", default-features = false } diff --git a/substrate/primitives/tracing/src/lib.rs b/substrate/primitives/tracing/src/lib.rs index b8b99230db574aea93fce3a3bdcdc2822c602e28..34ed088aed0b9d648f429b6558846332cbed35e3 100644 --- a/substrate/primitives/tracing/src/lib.rs +++ b/substrate/primitives/tracing/src/lib.rs @@ -17,7 +17,7 @@ //! Substrate tracing primitives and macros. //! -//! To trace functions or invidual code in Substrate, this crate provides [`within_span`] +//! To trace functions or individual code in Substrate, this crate provides [`within_span`] //! and [`enter_span`]. See the individual docs for how to use these macros. //! //! Note that to allow traces from wasm execution environment there are @@ -70,7 +70,7 @@ pub use crate::types::{WASM_NAME_KEY, WASM_TARGET_KEY, WASM_TRACE_IDENTIFIER}; /// directly as they yield nothing without the feature present. Instead you should use /// `enter_span!` and `within_span!` – which would strip away even any parameter conversion /// you do within the span-definition (and thus optimise your performance). For your -/// convineience you directly specify the `Level` and name of the span or use the full +/// convenience you directly specify the `Level` and name of the span or use the full /// feature set of `span!`/`span_*!` on it: /// /// # Example @@ -98,7 +98,7 @@ pub use crate::types::{WASM_NAME_KEY, WASM_TARGET_KEY, WASM_TRACE_IDENTIFIER}; /// This project only provides the macros and facilities to manage tracing /// it doesn't implement the tracing subscriber or backend directly – that is /// up to the developer integrating it into a specific environment. In native -/// this can and must be done through the regular `tracing`-facitilies, please +/// this can and must be done through the regular `tracing`-facilities, please /// see their documentation for details. /// /// On the wasm-side we've adopted a similar approach of having a global @@ -139,7 +139,7 @@ pub fn init_for_tests() { /// Runs given code within a tracing span, measuring it's execution time. /// /// If tracing is not enabled, the code is still executed. Pass in level and name or -/// use any valid `sp_tracing::Span`followe by `;` and the code to execute, +/// use any valid `sp_tracing::Span`followed by `;` and the code to execute, /// /// # Example /// diff --git a/substrate/primitives/tracing/src/types.rs b/substrate/primitives/tracing/src/types.rs index 3692a81e03c8efde1fadc48b74c10481f3632143..46f38383d98063469b9f889ee1debabb34ad8f15 100644 --- a/substrate/primitives/tracing/src/types.rs +++ b/substrate/primitives/tracing/src/types.rs @@ -17,7 +17,7 @@ use alloc::{vec, vec::Vec}; use codec::{Decode, Encode}; -/// Types for wasm based tracing. Loosly inspired by `tracing-core` but +/// Types for wasm based tracing. Loosely inspired by `tracing-core` but /// optimised for the specific use case. use core::{fmt::Debug, format_args}; @@ -54,7 +54,7 @@ impl core::default::Default for WasmLevel { } } -/// A paramter value provided to the span/event +/// A parameter value provided to the span/event #[derive(Encode, Decode, Clone)] pub enum WasmValue { U8(u8), @@ -180,9 +180,9 @@ impl From for WasmValue { } } -/// The name of a field provided as the argument name when contstructing an +/// The name of a field provided as the argument name when constructing an /// `event!` or `span!`. -/// Generally generated automaticaly via `stringify` from an `'static &str`. +/// Generally generated automatically via `stringify` from an `'static &str`. /// Likely print-able. #[derive(Encode, Decode, Clone)] pub struct WasmFieldName(Vec); @@ -320,7 +320,7 @@ impl tracing_core::field::Visit for WasmValuesSet { self.0.push((field.name().into(), Some(WasmValue::from(value)))) } } -/// Metadata provides generic information about the specifc location of the +/// Metadata provides generic information about the specific location of the /// `span!` or `event!` call on the wasm-side. #[derive(Encode, Decode, Clone)] pub struct WasmMetadata { diff --git a/substrate/primitives/transaction-storage-proof/Cargo.toml b/substrate/primitives/transaction-storage-proof/Cargo.toml index fbd0a4752fca988fbe2e3a18228ad3fb754993dd..137a232fce73b9937158afba0059b05aefea10bc 100644 --- a/substrate/primitives/transaction-storage-proof/Cargo.toml +++ b/substrate/primitives/transaction-storage-proof/Cargo.toml @@ -16,9 +16,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.74", optional = true } +async-trait = { version = "0.1.79", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-core = { path = "../core", optional = true } sp-inherents = { path = "../inherents", default-features = false } sp-runtime = { path = "../runtime", default-features = false } diff --git a/substrate/primitives/transaction-storage-proof/src/lib.rs b/substrate/primitives/transaction-storage-proof/src/lib.rs index 352f2aec26ee6ad8a9b8be3c5ded5349f6b65585..893b2e33bee6ca21bd900e485d482cb0acba3cc7 100644 --- a/substrate/primitives/transaction-storage-proof/src/lib.rs +++ b/substrate/primitives/transaction-storage-proof/src/lib.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Storage proof primitives. Constains types and basic code to extract storage +//! Storage proof primitives. Contains types and basic code to extract storage //! proofs for indexed transactions. #![cfg_attr(not(feature = "std"), no_std)] diff --git a/substrate/primitives/trie/Cargo.toml b/substrate/primitives/trie/Cargo.toml index dd7ab080e5f4d3cda170ec6f8c8c6be55b8a46b3..28c496d7a8e0a5fe79a8a049473842f8856a7325 100644 --- a/substrate/primitives/trie/Cargo.toml +++ b/substrate/primitives/trie/Cargo.toml @@ -29,7 +29,7 @@ memory-db = { version = "0.32.0", default-features = false } nohash-hasher = { version = "0.2.0", optional = true } parking_lot = { version = "0.12.1", optional = true } rand = { version = "0.8", optional = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } thiserror = { optional = true, workspace = true } tracing = { version = "0.1.29", optional = true } trie-db = { version = "0.28.0", default-features = false } diff --git a/substrate/primitives/trie/src/cache/mod.rs b/substrate/primitives/trie/src/cache/mod.rs index 01f08a78adcf2fff3612b7dfe0aebc83b2b17658..32078169b50305ad08ecaf4b07679a792411dff0 100644 --- a/substrate/primitives/trie/src/cache/mod.rs +++ b/substrate/primitives/trie/src/cache/mod.rs @@ -323,7 +323,7 @@ type ValueAccessSet = /// /// This cache should be used per state instance created by the backend. One state instance is /// referring to the state of one block. It will cache all the accesses that are done to the state -/// which could not be fullfilled by the [`SharedTrieCache`]. These locally cached items are merged +/// which could not be fulfilled by the [`SharedTrieCache`]. These locally cached items are merged /// back to the shared trie cache when this instance is dropped. /// /// When using [`Self::as_trie_db_cache`] or [`Self::as_trie_db_mut_cache`], it will lock Mutexes. diff --git a/substrate/primitives/trie/src/lib.rs b/substrate/primitives/trie/src/lib.rs index 81b37a0c9a6ca14ec901ff38d53992dd25f8f81f..0ae448aff6e13db074665dbf7500c8a35e407af5 100644 --- a/substrate/primitives/trie/src/lib.rs +++ b/substrate/primitives/trie/src/lib.rs @@ -326,7 +326,7 @@ pub fn read_trie_value( +pub fn read_trie_first_descendant_value( db: &DB, root: &TrieHash, key: &[u8], @@ -447,7 +447,7 @@ where /// Read the [`trie_db::MerkleValue`] of the node that is the closest descendant for /// the provided child key. -pub fn read_child_trie_first_descedant_value( +pub fn read_child_trie_first_descendant_value( keyspace: &[u8], db: &DB, root: &TrieHash, diff --git a/substrate/primitives/version/Cargo.toml b/substrate/primitives/version/Cargo.toml index a94e2322430b421d24b438b2899b67feec9dd059..d686b0c75512dac9931c46ffb41c4e4a11522718 100644 --- a/substrate/primitives/version/Cargo.toml +++ b/substrate/primitives/version/Cargo.toml @@ -20,7 +20,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } impl-serde = { version = "0.4.0", default-features = false, optional = true } parity-wasm = { version = "0.45", optional = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], optional = true, workspace = true } thiserror = { optional = true, workspace = true } sp-crypto-hashing-proc-macro = { path = "../crypto/hashing/proc-macro" } diff --git a/substrate/primitives/version/proc-macro/src/decl_runtime_version.rs b/substrate/primitives/version/proc-macro/src/decl_runtime_version.rs index 7ca2d9b71f60a8059b4b3004ef3c089f3d110002..3671d4aff6bb0d14dd100427340f366b92b5b3a1 100644 --- a/substrate/primitives/version/proc-macro/src/decl_runtime_version.rs +++ b/substrate/primitives/version/proc-macro/src/decl_runtime_version.rs @@ -52,7 +52,7 @@ fn decl_runtime_version_impl_inner(item: ItemConst) -> Result { /// enable `std` feature even for `no_std` wasm runtime builds. /// /// One difference from the original definition is the `apis` field. Since we don't actually parse -/// `apis` from this macro it will always be emitteed as empty. An empty vector can be encoded as +/// `apis` from this macro it will always be emitted as empty. An empty vector can be encoded as /// a zero-byte, thus `u8` is sufficient here. #[derive(Encode)] struct RuntimeVersion { diff --git a/substrate/primitives/version/src/lib.rs b/substrate/primitives/version/src/lib.rs index 9b14a809ac100de9e7a0655fdd7d5e55d23fbc0e..789c507742f779983b478ccd8bc9ff87a87d7165 100644 --- a/substrate/primitives/version/src/lib.rs +++ b/substrate/primitives/version/src/lib.rs @@ -327,7 +327,7 @@ impl RuntimeVersion { /// /// For runtime with core api version less than 4, /// V0 trie version will be applied to state. - /// Otherwhise, V1 trie version will be use. + /// Otherwise, V1 trie version will be use. pub fn state_version(&self) -> StateVersion { // If version > than 1, keep using latest version. self.state_version.try_into().unwrap_or(StateVersion::V1) diff --git a/substrate/primitives/wasm-interface/Cargo.toml b/substrate/primitives/wasm-interface/Cargo.toml index c05cc05ff06de9c5d1a96c3f406323e8551f388b..15a20fab5e5d83aa4635a38c43738a3007847481 100644 --- a/substrate/primitives/wasm-interface/Cargo.toml +++ b/substrate/primitives/wasm-interface/Cargo.toml @@ -25,5 +25,5 @@ anyhow = { version = "1.0.81", optional = true } [features] default = ["std"] -std = ["codec/std", "log/std", "wasmtime"] +std = ["codec/std", "log/std"] wasmtime = ["anyhow", "dep:wasmtime"] diff --git a/substrate/primitives/weights/Cargo.toml b/substrate/primitives/weights/Cargo.toml index d3118defb58d77227507f1718d0d2896712d50a1..e73d4a702b42e4ac5b0cd0a37f1c15eb70bec5e2 100644 --- a/substrate/primitives/weights/Cargo.toml +++ b/substrate/primitives/weights/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] bounded-collections = { version = "0.2.0", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, features = ["alloc", "derive"], workspace = true } smallvec = "1.11.0" sp-arithmetic = { path = "../arithmetic", default-features = false } diff --git a/substrate/test-utils/Cargo.toml b/substrate/test-utils/Cargo.toml index af8b01cdef087396a36ccd2b5c14f9c9640443c4..56b1c038199a81fb84c03474f0d7c5bbf5ab2c67 100644 --- a/substrate/test-utils/Cargo.toml +++ b/substrate/test-utils/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -futures = "0.3.16" +futures = "0.3.30" tokio = { version = "1.22.0", features = ["macros", "time"] } [dev-dependencies] diff --git a/substrate/test-utils/client/Cargo.toml b/substrate/test-utils/client/Cargo.toml index 349b04d32d7baff5c24323e171ebe373730f464f..4e4e65314fc3d0d1b6af4f0645c81e2a25835c68 100644 --- a/substrate/test-utils/client/Cargo.toml +++ b/substrate/test-utils/client/Cargo.toml @@ -17,9 +17,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = "6.1" -async-trait = "0.1.74" +async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.6.1" } -futures = "0.3.21" +futures = "0.3.30" serde = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } sc-client-api = { path = "../../client/api" } diff --git a/substrate/test-utils/runtime/Cargo.toml b/substrate/test-utils/runtime/Cargo.toml index f49503da8ca50d8c648e63332c4423b203338639..ffbd59f39ad27800815f237e21b645a050a28c78 100644 --- a/substrate/test-utils/runtime/Cargo.toml +++ b/substrate/test-utils/runtime/Cargo.toml @@ -22,7 +22,7 @@ sp-consensus-babe = { path = "../../primitives/consensus/babe", default-features sp-genesis-builder = { path = "../../primitives/genesis-builder", default-features = false } sp-block-builder = { path = "../../primitives/block-builder", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-inherents = { path = "../../primitives/inherents", default-features = false } sp-keyring = { path = "../../primitives/keyring", default-features = false } sp-offchain = { path = "../../primitives/offchain", default-features = false } @@ -53,7 +53,7 @@ array-bytes = { version = "6.1", optional = true } log = { workspace = true } [dev-dependencies] -futures = "0.3.21" +futures = "0.3.30" sc-block-builder = { path = "../../client/block-builder" } sc-chain-spec = { path = "../../client/chain-spec" } sc-executor = { path = "../../client/executor" } diff --git a/substrate/test-utils/runtime/client/Cargo.toml b/substrate/test-utils/runtime/client/Cargo.toml index cbb964f6785237ff45b58f6b702b9a3e4a3abbbe..5ca24fea33edab61162ea1e98a20bf8eb7e2b2cf 100644 --- a/substrate/test-utils/runtime/client/Cargo.toml +++ b/substrate/test-utils/runtime/client/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -futures = "0.3.21" +futures = "0.3.30" sc-block-builder = { path = "../../../client/block-builder" } sc-client-api = { path = "../../../client/api" } sc-consensus = { path = "../../../client/consensus/common" } diff --git a/substrate/test-utils/runtime/transaction-pool/Cargo.toml b/substrate/test-utils/runtime/transaction-pool/Cargo.toml index 33e56e8e55e7a7479fb1c783af818f3120e1b7f4..9b52706c739915690c7f734c399a3071b30a3630 100644 --- a/substrate/test-utils/runtime/transaction-pool/Cargo.toml +++ b/substrate/test-utils/runtime/transaction-pool/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } -futures = "0.3.21" +futures = "0.3.30" parking_lot = "0.12.1" thiserror = { workspace = true } sc-transaction-pool = { path = "../../../client/transaction-pool" } diff --git a/substrate/utils/binary-merkle-tree/Cargo.toml b/substrate/utils/binary-merkle-tree/Cargo.toml index 6ba515afee175751840edadfb7e6fc2ab01c5e46..a89006d94dc199951ffd5381ff7f51446a21acd6 100644 --- a/substrate/utils/binary-merkle-tree/Cargo.toml +++ b/substrate/utils/binary-merkle-tree/Cargo.toml @@ -18,7 +18,7 @@ hash-db = { version = "0.16.0", default-features = false } [dev-dependencies] array-bytes = "6.1" -env_logger = "0.9" +env_logger = "0.11" sp-core = { path = "../../primitives/core" } sp-runtime = { path = "../../primitives/runtime" } diff --git a/substrate/utils/fork-tree/src/lib.rs b/substrate/utils/fork-tree/src/lib.rs index cd175166b9cdf4129b4718592dc2277b8c78103f..ff86467c85d5445a66b3d86cc04f83fcf5a110c7 100644 --- a/substrate/utils/fork-tree/src/lib.rs +++ b/substrate/utils/fork-tree/src/lib.rs @@ -683,7 +683,7 @@ where node.data }); - // Retain only roots that are descendents of the finalized block (this + // Retain only roots that are descendants of the finalized block (this // happens if the node has been properly finalized) or that are // ancestors (or equal) to the finalized block (in this case the node // wasn't finalized earlier presumably because the predicate didn't @@ -1168,7 +1168,7 @@ mod test { Ok(Some(false)), ); - // finalizing "E" is not allowed since there are not finalized anchestors. + // finalizing "E" is not allowed since there are not finalized ancestors. assert_eq!( tree.finalizes_any_with_descendent_if(&"E", 15, &is_descendent_of, |c| c.effective == 10), @@ -1309,7 +1309,7 @@ mod test { fn map_works() { let (mut tree, _) = test_fork_tree(); - // Extend the single root fork-tree to also excercise the roots order during map. + // Extend the single root fork-tree to also exercise the roots order during map. let is_descendent_of = |_: &&str, _: &&str| -> Result { Ok(false) }; let is_root = tree.import("A1", 10, 1, &is_descendent_of).unwrap(); assert!(is_root); diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs b/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs index c6ba282401672335a6a72e698533a4af35670523..80a5d27d8c26285f621912540a22e95682d44ccd 100644 --- a/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs +++ b/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs @@ -415,7 +415,7 @@ impl PalletCmd { .map_err(|e| { format!("Error executing and verifying runtime benchmark: {}", e) })?; - // Dont use these results since verification code will add overhead. + // Don't use these results since verification code will add overhead. let _batch = , String> as Decode>::decode( &mut &result[..], @@ -437,7 +437,7 @@ impl PalletCmd { &pallet.clone(), &extrinsic.clone(), &selected_components.clone(), - false, // dont run verification code for final values + false, // don't run verification code for final values self.repeat, ) .encode(), @@ -469,7 +469,7 @@ impl PalletCmd { &pallet.clone(), &extrinsic.clone(), &selected_components.clone(), - false, // dont run verification code for final values + false, // don't run verification code for final values self.repeat, ) .encode(), diff --git a/substrate/utils/frame/generate-bags/src/lib.rs b/substrate/utils/frame/generate-bags/src/lib.rs index 923017261a44b3ce065dab99a55046ebfdcae594..62485c442d3654441765331f73d52951db014ee9 100644 --- a/substrate/utils/frame/generate-bags/src/lib.rs +++ b/substrate/utils/frame/generate-bags/src/lib.rs @@ -183,7 +183,7 @@ pub fn generate_thresholds( total_issuance: u128, minimum_balance: u128, ) -> Result<(), std::io::Error> { - // ensure the file is accessable + // ensure the file is accessible if let Some(parent) = output.parent() { if !parent.exists() { std::fs::create_dir_all(parent)?; diff --git a/substrate/utils/frame/remote-externalities/Cargo.toml b/substrate/utils/frame/remote-externalities/Cargo.toml index 61e0a861ee0ebd847a54c46280d5c3c10fce0ad2..82b019154832aee4202d60d3a1a2b9015b7b774a 100644 --- a/substrate/utils/frame/remote-externalities/Cargo.toml +++ b/substrate/utils/frame/remote-externalities/Cargo.toml @@ -26,7 +26,7 @@ sp-io = { path = "../../../primitives/io" } sp-runtime = { path = "../../../primitives/runtime" } tokio = { version = "1.22.0", features = ["macros", "rt-multi-thread"] } substrate-rpc-client = { path = "../rpc/client" } -futures = "0.3" +futures = "0.3.30" indicatif = "0.17.7" spinners = "4.1.0" tokio-retry = "0.3.0" diff --git a/substrate/utils/frame/remote-externalities/src/lib.rs b/substrate/utils/frame/remote-externalities/src/lib.rs index c7399468da9da7e9e0d692ec659db3076a2d5a79..254d33deec80d777eaba930e06cd087d9c4d26f1 100644 --- a/substrate/utils/frame/remote-externalities/src/lib.rs +++ b/substrate/utils/frame/remote-externalities/src/lib.rs @@ -762,7 +762,7 @@ where let mut sp = Spinner::with_timer(Spinners::Dots, "Inserting keys into DB...".into()); let start = Instant::now(); pending_ext.batch_insert(key_values.clone().into_iter().filter_map(|(k, v)| { - // Don't insert the child keys here, they need to be inserted seperately with all their + // Don't insert the child keys here, they need to be inserted separately with all their // data in the load_child_remote function. match is_default_child_storage_key(&k.0) { true => None, diff --git a/substrate/utils/frame/rpc/client/Cargo.toml b/substrate/utils/frame/rpc/client/Cargo.toml index b51e3f44f4e6457ee2f9e12dedace27728b96ac4..501bb95b257949f2b91678d361d210c440057906 100644 --- a/substrate/utils/frame/rpc/client/Cargo.toml +++ b/substrate/utils/frame/rpc/client/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] jsonrpsee = { version = "0.22", features = ["ws-client"] } sc-rpc-api = { path = "../../../../client/rpc-api" } -async-trait = "0.1.74" +async-trait = "0.1.79" serde = { workspace = true, default-features = true } sp-runtime = { path = "../../../../primitives/runtime" } log = { workspace = true, default-features = true } diff --git a/substrate/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs b/substrate/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs index f45258ea593d74c417bcb301c8ca755f0d396945..c0333bb7dac0f19ed30adc108845f88c918c900f 100644 --- a/substrate/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs +++ b/substrate/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs @@ -132,7 +132,7 @@ pub trait StateMigrationApi { /// Check current migration state. /// /// This call is performed locally without submitting any transactions. Thus executing this - /// won't change any state. Nonetheless it is a VERY costy call that should be + /// won't change any state. Nonetheless it is a VERY costly call that should be /// only exposed to trusted peers. #[method(name = "state_trieMigrationStatus")] fn call(&self, at: Option) -> RpcResult; diff --git a/substrate/utils/frame/rpc/support/Cargo.toml b/substrate/utils/frame/rpc/support/Cargo.toml index 2e4bb6a105784fafecd1344a909e69dbcbfa0e3f..84db06da7b0d26fa52f066dc7ef9f6d879bcc34c 100644 --- a/substrate/utils/frame/rpc/support/Cargo.toml +++ b/substrate/utils/frame/rpc/support/Cargo.toml @@ -23,9 +23,9 @@ sc-rpc-api = { path = "../../../../client/rpc-api" } sp-storage = { path = "../../../../primitives/storage" } [dev-dependencies] -scale-info = "2.10.0" +scale-info = "2.11.1" jsonrpsee = { version = "0.22", features = ["jsonrpsee-types", "ws-client"] } -tokio = "1.22.0" +tokio = "1.37" sp-core = { path = "../../../../primitives/core" } sp-runtime = { path = "../../../../primitives/runtime" } frame-system = { path = "../../../../frame/system" } diff --git a/substrate/utils/frame/rpc/system/Cargo.toml b/substrate/utils/frame/rpc/system/Cargo.toml index f9a84a01af82f9cf4746e003990e43cb2515a7bc..9e571bef9d04fd6025990755a2ecae79e180f335 100644 --- a/substrate/utils/frame/rpc/system/Cargo.toml +++ b/substrate/utils/frame/rpc/system/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } jsonrpsee = { version = "0.22", features = ["client-core", "macros", "server"] } -futures = "0.3.21" +futures = "0.3.30" log = { workspace = true, default-features = true } frame-system-rpc-runtime-api = { path = "../../../../frame/system/rpc/runtime-api" } sc-rpc-api = { path = "../../../../client/rpc-api" } @@ -31,7 +31,7 @@ sp-runtime = { path = "../../../../primitives/runtime" } [dev-dependencies] sc-transaction-pool = { path = "../../../../client/transaction-pool" } -tokio = "1.22.0" +tokio = "1.37" assert_matches = "1.3.0" sp-tracing = { path = "../../../../primitives/tracing" } substrate-test-runtime-client = { path = "../../../../test-utils/runtime/client" } diff --git a/substrate/utils/frame/try-runtime/cli/Cargo.toml b/substrate/utils/frame/try-runtime/cli/Cargo.toml index 3ff57745a2dbea89e0b0e01ac3f24e32ac8126b7..618cb645475d7daa720e94591032ec492f41a6e0 100644 --- a/substrate/utils/frame/try-runtime/cli/Cargo.toml +++ b/substrate/utils/frame/try-runtime/cli/Cargo.toml @@ -37,7 +37,7 @@ sp-weights = { path = "../../../../primitives/weights" } frame-try-runtime = { path = "../../../../frame/try-runtime", optional = true } substrate-rpc-client = { path = "../../rpc/client" } -async-trait = "0.1.74" +async-trait = "0.1.79" clap = { version = "4.5.3", features = ["derive"] } hex = { version = "0.4.3", default-features = false } log = { workspace = true, default-features = true } @@ -52,7 +52,7 @@ node-primitives = { path = "../../../../bin/node/primitives" } regex = "1.7.3" substrate-cli-test-utils = { path = "../../../../test-utils/cli" } tempfile = "3.1.0" -tokio = "1.27.0" +tokio = "1.37" [features] try-runtime = [ diff --git a/substrate/utils/substrate-bip39/README.md b/substrate/utils/substrate-bip39/README.md index 368d18f406bb6cf36abad1daa4020882c1a2fe73..e7a80ca5f2cf8c4924cfb2629ce6e3ad97c295cc 100644 --- a/substrate/utils/substrate-bip39/README.md +++ b/substrate/utils/substrate-bip39/README.md @@ -21,12 +21,12 @@ to wallets providing their own dictionaries and checksum mechanism. Issues with to CSPRNG supplied dictionary phrases. 2. Providing own dictionaries felt into the _you ain't gonna need it_ anti-pattern category on day 1. Wallet providers - (be it hardware or software) typically want their products to be compatibile with other wallets so that users can + (be it hardware or software) typically want their products to be compatible with other wallets so that users can migrate to their product without having to migrate all their assets. To achieve the above phrases have to be precisely encoded in _The One True Canonical Encoding_, for which UTF-8 NFKD was chosen. This is largely irrelevant (and even ignored) for English phrases, as they encode to basically just ASCII in -virtualy every character encoding known to mankind, but immedietly becomes a problem for dictionaries that do use +virtually every character encoding known to mankind, but immediately becomes a problem for dictionaries that do use non-ASCII characters. Even if the right encoding is used and implemented correctly, there are still [other caveats present for some non-english dictionaries](https://github.com/bitcoin/bips/blob/master/bip-0039/bip-0039-wordlists.md), such as normalizing spaces to a canonical form, or making some latin based characters equivalent to their base in @@ -34,8 +34,8 @@ dictionary lookups (eg. Spanish `ñ` and `n` are meant to be interchangeable). T headache, and opens doors for disagreements between buggy implementations, breaking compatibility. BIP39 does already provide a form of the mnemonic that is free from all of these issues: the entropy byte array. Since -veryfing the checksum requires that we recover the entropy from which the phrase was generated, no extra work is -actually needed here. Wallet implementators can encode the dictionaries in whatever encoding they find convenient (as +verifying the checksum requires that we recover the entropy from which the phrase was generated, no extra work is +actually needed here. Wallet implementors can encode the dictionaries in whatever encoding they find convenient (as long as they are the standard BIP39 dictionaries), no harm in using UTF-16 string primitives that Java and JavaScript provide. Since the dictionary is fixed and known, and the checksum is done on the entropy itself, the exact character encoding used becomes irrelevant, as are the precise codepoints and amount of whitespace around the words. It is thus diff --git a/substrate/utils/substrate-bip39/src/lib.rs b/substrate/utils/substrate-bip39/src/lib.rs index 3673d20faed68e8897b97040c66d726a8877fd7a..5b68bef0c390e364f4c87cbfe0d1b7723b1f54f0 100644 --- a/substrate/utils/substrate-bip39/src/lib.rs +++ b/substrate/utils/substrate-bip39/src/lib.rs @@ -43,7 +43,7 @@ pub enum Error { /// /// Any other length will return an error. /// -/// `password` is analog to BIP39 seed generation itself, with an empty string being defalt. +/// `password` is analog to BIP39 seed generation itself, with an empty string being default. pub fn mini_secret_from_entropy(entropy: &[u8], password: &str) -> Result { let seed = seed_from_entropy(entropy, password)?; Ok(MiniSecretKey::from_bytes(&seed[..32]).expect("Length is always correct; qed")) diff --git a/substrate/utils/wasm-builder/Cargo.toml b/substrate/utils/wasm-builder/Cargo.toml index 7abd1a202848f6159ef33bb9efe06de61cac797c..bac323e2e6a0905cf1d956589235c196ef2881a4 100644 --- a/substrate/utils/wasm-builder/Cargo.toml +++ b/substrate/utils/wasm-builder/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] build-helper = "0.1.1" cargo_metadata = "0.15.4" console = "0.15.8" -strum = { version = "0.24.1", features = ["derive"] } +strum = { version = "0.26.2", features = ["derive"] } tempfile = "3.1.0" toml = "0.8.8" walkdir = "2.4.0" diff --git a/substrate/utils/wasm-builder/src/lib.rs b/substrate/utils/wasm-builder/src/lib.rs index 5cde48c0950b341cd36f3a5594620be683c799ea..178e499e8f5bb6d7531d73004e2f37e1e9b1d146 100644 --- a/substrate/utils/wasm-builder/src/lib.rs +++ b/substrate/utils/wasm-builder/src/lib.rs @@ -207,7 +207,7 @@ fn get_cargo_command(target: RuntimeTarget) -> CargoCommand { } else { // If no command before provided us with a cargo that supports our Substrate wasm env, we // try to search one with rustup. If that fails as well, we return the default cargo and let - // the prequisities check fail. + // the perquisites check fail. get_rustup_command(target).unwrap_or(default_cargo) } } diff --git a/substrate/utils/wasm-builder/src/wasm_project.rs b/substrate/utils/wasm-builder/src/wasm_project.rs index 13db9fce43103dabf94346b2a75b64cfeaca22e5..b58e6bfa36b478b91995789a148b042d167d9f3f 100644 --- a/substrate/utils/wasm-builder/src/wasm_project.rs +++ b/substrate/utils/wasm-builder/src/wasm_project.rs @@ -695,7 +695,7 @@ impl BuildConfiguration { /// "production". It would only contain the builtin profile where the custom profile /// inherits from. This is why we inspect the build path to learn which profile is used. /// - /// When not overriden by a env variable we always default to building wasm with the `Release` + /// When not overridden by a env variable we always default to building wasm with the `Release` /// profile even when the main build uses the debug build. This is because wasm built with the /// `Debug` profile is too slow for normal development activities and almost never intended. /// @@ -704,9 +704,9 @@ impl BuildConfiguration { /// /// # Note /// - /// Can be overriden by setting [`crate::WASM_BUILD_TYPE_ENV`]. + /// Can be overridden by setting [`crate::WASM_BUILD_TYPE_ENV`]. fn detect(target: RuntimeTarget, wasm_project: &Path) -> Self { - let (name, overriden) = if let Ok(name) = env::var(crate::WASM_BUILD_TYPE_ENV) { + let (name, overridden) = if let Ok(name) = env::var(crate::WASM_BUILD_TYPE_ENV) { (name, true) } else { // First go backwards to the beginning of the target directory. @@ -731,14 +731,14 @@ impl BuildConfiguration { (name, false) }; let outer_build_profile = Profile::iter().find(|p| p.directory() == name); - let blob_build_profile = match (outer_build_profile.clone(), overriden) { - // When not overriden by a env variable we default to using the `Release` profile + let blob_build_profile = match (outer_build_profile.clone(), overridden) { + // When not overridden by a env variable we default to using the `Release` profile // for the wasm build even when the main build uses the debug build. This // is because the `Debug` profile is too slow for normal development activities. (Some(Profile::Debug), false) => Profile::Release, - // For any other profile or when overriden we take it at face value. + // For any other profile or when overridden we take it at face value. (Some(profile), _) => profile, - // For non overriden unknown profiles we fall back to `Release`. + // For non overridden unknown profiles we fall back to `Release`. // This allows us to continue building when a custom profile is used for the // main builds cargo. When explicitly passing a profile via env variable we are // not doing a fallback. diff --git a/templates/minimal/node/Cargo.toml b/templates/minimal/node/Cargo.toml index 65aa1e2075209733c1277b9dd71a847c5aa26dc9..0668304e50284721dc85968b5ee3102c6c5dbdf2 100644 --- a/templates/minimal/node/Cargo.toml +++ b/templates/minimal/node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "minimal-template-node" -description = "A miniaml Substrate-based Substrate node, ready for hacking." +description = "A minimal Substrate-based Substrate node, ready for hacking." version = "0.0.0" license = "MIT-0" authors.workspace = true @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] clap = { version = "4.5.3", features = ["derive"] } -futures = { version = "0.3.21", features = ["thread-pool"] } +futures = { version = "0.3.30", features = ["thread-pool"] } futures-timer = "3.0.1" jsonrpsee = { version = "0.22", features = ["server"] } serde_json = { workspace = true, default-features = true } diff --git a/templates/minimal/pallets/template/Cargo.toml b/templates/minimal/pallets/template/Cargo.toml index 9982e5ea53bc3eff28fb0331cfb14ad41a15eec7..a85391f2942143aa361b316971949f4c83ba4a63 100644 --- a/templates/minimal/pallets/template/Cargo.toml +++ b/templates/minimal/pallets/template/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive", ], default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = [ +scale-info = { version = "2.11.1", default-features = false, features = [ "derive", ] } frame = { path = "../../../../substrate/frame", default-features = false, features = [ diff --git a/templates/minimal/runtime/Cargo.toml b/templates/minimal/runtime/Cargo.toml index 20ffb706eb4992b71a7b34d638b7c4cdc70237fd..a99a1e43f85fcaf3e4d2b7122cfca701c6b8b4e4 100644 --- a/templates/minimal/runtime/Cargo.toml +++ b/templates/minimal/runtime/Cargo.toml @@ -29,7 +29,7 @@ pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-featur pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } -# genesis builder that allows us to interacto with runtime genesis config +# genesis builder that allows us to interact with runtime genesis config sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } # local pallet templates diff --git a/templates/parachain/node/src/service.rs b/templates/parachain/node/src/service.rs index 4dd24803e9b124d386d4912236ae1abd42229802..7e7bf1726b5ef1db09c2fa321f1acea5dd0ae1cc 100644 --- a/templates/parachain/node/src/service.rs +++ b/templates/parachain/node/src/service.rs @@ -6,8 +6,8 @@ use std::{sync::Arc, time::Duration}; use cumulus_client_cli::CollatorOptions; // Local Runtime Types use parachain_template_runtime::{ + apis::RuntimeApi, opaque::{Block, Hash}, - RuntimeApi, }; // Cumulus Imports @@ -16,7 +16,8 @@ use cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImpo use cumulus_client_consensus_proposer::Proposer; use cumulus_client_service::{ build_network, build_relay_chain_interface, prepare_node_config, start_relay_chain_tasks, - BuildNetworkParams, CollatorSybilResistance, DARecoveryProfile, StartRelayChainTasksParams, + BuildNetworkParams, CollatorSybilResistance, DARecoveryProfile, ParachainHostFunctions, + StartRelayChainTasksParams, }; use cumulus_primitives_core::{relay_chain::CollatorPair, ParaId}; use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface}; @@ -25,9 +26,7 @@ use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface}; use frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE; use sc_client_api::Backend; use sc_consensus::ImportQueue; -use sc_executor::{ - HeapAllocStrategy, NativeElseWasmExecutor, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY, -}; +use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY}; use sc_network::NetworkBlock; use sc_network_sync::SyncingService; use sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager}; @@ -36,25 +35,7 @@ use sc_transaction_pool_api::OffchainTransactionPoolFactory; use sp_keystore::KeystorePtr; use substrate_prometheus_endpoint::Registry; -/// Native executor type. -pub struct ParachainNativeExecutor; - -impl sc_executor::NativeExecutionDispatch for ParachainNativeExecutor { - type ExtendHostFunctions = ( - cumulus_client_service::storage_proof_size::HostFunctions, - frame_benchmarking::benchmarking::HostFunctions, - ); - - fn dispatch(method: &str, data: &[u8]) -> Option> { - parachain_template_runtime::api::dispatch(method, data) - } - - fn native_version() -> sc_executor::NativeVersion { - parachain_template_runtime::native_version() - } -} - -type ParachainExecutor = NativeElseWasmExecutor; +type ParachainExecutor = WasmExecutor; type ParachainClient = TFullClient; @@ -92,7 +73,7 @@ pub fn new_partial(config: &Configuration) -> Result .default_heap_pages .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static { extra_pages: h as _ }); - let wasm = WasmExecutor::builder() + let executor = ParachainExecutor::builder() .with_execution_method(config.wasm_method) .with_onchain_heap_alloc_strategy(heap_pages) .with_offchain_heap_alloc_strategy(heap_pages) @@ -100,8 +81,6 @@ pub fn new_partial(config: &Configuration) -> Result .with_runtime_cache_size(config.runtime_cache_size) .build(); - let executor = ParachainExecutor::new_with_wasm_executor(wasm); - let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts_record_import::( config, diff --git a/templates/parachain/pallets/template/Cargo.toml b/templates/parachain/pallets/template/Cargo.toml index 89eb9d5171630459e2e313ad94cee53e97921cac..199da2f12d2c691ba6d51d009c6b20734b1c4c46 100644 --- a/templates/parachain/pallets/template/Cargo.toml +++ b/templates/parachain/pallets/template/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = [ +scale-info = { version = "2.11.1", default-features = false, features = [ "derive", ] } diff --git a/templates/parachain/runtime/Cargo.toml b/templates/parachain/runtime/Cargo.toml index 41a510c5ed35dd2c4d9c6a8266edc2d4a10a8f11..0d985796a11e880123f9b7ca71bf123fe1b48184 100644 --- a/templates/parachain/runtime/Cargo.toml +++ b/templates/parachain/runtime/Cargo.toml @@ -24,7 +24,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = ] } hex-literal = { version = "0.4.1", optional = true } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = [ +scale-info = { version = "2.11.1", default-features = false, features = [ "derive", ] } smallvec = "1.11.0" diff --git a/templates/parachain/runtime/src/apis.rs b/templates/parachain/runtime/src/apis.rs new file mode 100644 index 0000000000000000000000000000000000000000..74c7476e15208cc49fc98056a800180254b0e227 --- /dev/null +++ b/templates/parachain/runtime/src/apis.rs @@ -0,0 +1,275 @@ +// This is free and unencumbered software released into the public domain. +// +// Anyone is free to copy, modify, publish, use, compile, sell, or +// distribute this software, either in source code form or as a compiled +// binary, for any purpose, commercial or non-commercial, and by any +// means. +// +// In jurisdictions that recognize copyright laws, the author or authors +// of this software dedicate any and all copyright interest in the +// software to the public domain. We make this dedication for the benefit +// of the public at large and to the detriment of our heirs and +// successors. We intend this dedication to be an overt act of +// relinquishment in perpetuity of all present and future rights to this +// software under copyright law. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +// IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +// OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +// OTHER DEALINGS IN THE SOFTWARE. +// +// For more information, please refer to + +// External crates imports +use frame_support::{ + genesis_builder_helper::{build_config, create_default_config}, + weights::Weight, +}; +use pallet_aura::Authorities; +use sp_api::impl_runtime_apis; +use sp_consensus_aura::sr25519::AuthorityId as AuraId; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; +use sp_runtime::{ + traits::Block as BlockT, + transaction_validity::{TransactionSource, TransactionValidity}, + ApplyExtrinsicResult, +}; +use sp_std::prelude::Vec; +use sp_version::RuntimeVersion; + +// Local module imports +use super::{ + AccountId, Aura, Balance, Block, Executive, InherentDataExt, Nonce, ParachainSystem, Runtime, + RuntimeCall, RuntimeGenesisConfig, SessionKeys, System, TransactionPayment, VERSION, +}; + +impl_runtime_apis! { + impl sp_consensus_aura::AuraApi for Runtime { + fn slot_duration() -> sp_consensus_aura::SlotDuration { + sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) + } + + fn authorities() -> Vec { + Authorities::::get().into_inner() + } + } + + impl sp_api::Core for Runtime { + fn version() -> RuntimeVersion { + VERSION + } + + fn execute_block(block: Block) { + Executive::execute_block(block) + } + + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { + Executive::initialize_block(header) + } + } + + impl sp_api::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + OpaqueMetadata::new(Runtime::metadata().into()) + } + + fn metadata_at_version(version: u32) -> Option { + Runtime::metadata_at_version(version) + } + + fn metadata_versions() -> sp_std::vec::Vec { + Runtime::metadata_versions() + } + } + + impl sp_block_builder::BlockBuilder for Runtime { + fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { + Executive::apply_extrinsic(extrinsic) + } + + fn finalize_block() -> ::Header { + Executive::finalize_block() + } + + fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { + data.create_extrinsics() + } + + fn check_inherents( + block: Block, + data: sp_inherents::InherentData, + ) -> sp_inherents::CheckInherentsResult { + data.check_extrinsics(&block) + } + } + + impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { + fn validate_transaction( + source: TransactionSource, + tx: ::Extrinsic, + block_hash: ::Hash, + ) -> TransactionValidity { + Executive::validate_transaction(source, tx, block_hash) + } + } + + impl sp_offchain::OffchainWorkerApi for Runtime { + fn offchain_worker(header: &::Header) { + Executive::offchain_worker(header) + } + } + + impl sp_session::SessionKeys for Runtime { + fn generate_session_keys(seed: Option>) -> Vec { + SessionKeys::generate(seed) + } + + fn decode_session_keys( + encoded: Vec, + ) -> Option, KeyTypeId)>> { + SessionKeys::decode_into_raw_public_keys(&encoded) + } + } + + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { + fn account_nonce(account: AccountId) -> Nonce { + System::account_nonce(account) + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi for Runtime { + fn query_info( + uxt: ::Extrinsic, + len: u32, + ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo { + TransactionPayment::query_info(uxt, len) + } + fn query_fee_details( + uxt: ::Extrinsic, + len: u32, + ) -> pallet_transaction_payment::FeeDetails { + TransactionPayment::query_fee_details(uxt, len) + } + fn query_weight_to_fee(weight: Weight) -> Balance { + TransactionPayment::weight_to_fee(weight) + } + fn query_length_to_fee(length: u32) -> Balance { + TransactionPayment::length_to_fee(length) + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi + for Runtime + { + fn query_call_info( + call: RuntimeCall, + len: u32, + ) -> pallet_transaction_payment::RuntimeDispatchInfo { + TransactionPayment::query_call_info(call, len) + } + fn query_call_fee_details( + call: RuntimeCall, + len: u32, + ) -> pallet_transaction_payment::FeeDetails { + TransactionPayment::query_call_fee_details(call, len) + } + fn query_weight_to_fee(weight: Weight) -> Balance { + TransactionPayment::weight_to_fee(weight) + } + fn query_length_to_fee(length: u32) -> Balance { + TransactionPayment::length_to_fee(length) + } + } + + impl cumulus_primitives_core::CollectCollationInfo for Runtime { + fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { + ParachainSystem::collect_collation_info(header) + } + } + + #[cfg(feature = "try-runtime")] + impl frame_try_runtime::TryRuntime for Runtime { + fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { + use super::configs::RuntimeBlockWeights; + + let weight = Executive::try_runtime_upgrade(checks).unwrap(); + (weight, RuntimeBlockWeights::get().max_block) + } + + fn execute_block( + block: Block, + state_root_check: bool, + signature_check: bool, + select: frame_try_runtime::TryStateSelect, + ) -> Weight { + // NOTE: intentional unwrap: we don't want to propagate the error backwards, and want to + // have a backtrace here. + Executive::try_execute_block(block, state_root_check, signature_check, select).unwrap() + } + } + + #[cfg(feature = "runtime-benchmarks")] + impl frame_benchmarking::Benchmark for Runtime { + fn benchmark_metadata(extra: bool) -> ( + Vec, + Vec, + ) { + use frame_benchmarking::{Benchmarking, BenchmarkList}; + use frame_support::traits::StorageInfoTrait; + use frame_system_benchmarking::Pallet as SystemBench; + use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use super::*; + + let mut list = Vec::::new(); + list_benchmarks!(list, extra); + + let storage_info = AllPalletsWithSystem::storage_info(); + (list, storage_info) + } + + fn dispatch_benchmark( + config: frame_benchmarking::BenchmarkConfig + ) -> Result, sp_runtime::RuntimeString> { + use frame_benchmarking::{BenchmarkError, Benchmarking, BenchmarkBatch}; + use super::*; + + use frame_system_benchmarking::Pallet as SystemBench; + impl frame_system_benchmarking::Config for Runtime { + fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { + ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); + Ok(()) + } + + fn verify_set_code() { + System::assert_last_event(cumulus_pallet_parachain_system::Event::::ValidationFunctionStored.into()); + } + } + + use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + impl cumulus_pallet_session_benchmarking::Config for Runtime {} + + use frame_support::traits::WhitelistedStorageKeys; + let whitelist = AllPalletsWithSystem::whitelisted_storage_keys(); + + let mut batches = Vec::::new(); + let params = (&config, &whitelist); + add_benchmarks!(params, batches); + + if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) } + Ok(batches) + } + } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + create_default_config::() + } + + fn build_config(config: Vec) -> sp_genesis_builder::Result { + build_config::(config) + } + } +} diff --git a/templates/parachain/runtime/src/configs/mod.rs b/templates/parachain/runtime/src/configs/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..e2c51e07d37b9fc453865f161f37aa2365d07ed5 --- /dev/null +++ b/templates/parachain/runtime/src/configs/mod.rs @@ -0,0 +1,311 @@ +// This is free and unencumbered software released into the public domain. +// +// Anyone is free to copy, modify, publish, use, compile, sell, or +// distribute this software, either in source code form or as a compiled +// binary, for any purpose, commercial or non-commercial, and by any +// means. +// +// In jurisdictions that recognize copyright laws, the author or authors +// of this software dedicate any and all copyright interest in the +// software to the public domain. We make this dedication for the benefit +// of the public at large and to the detriment of our heirs and +// successors. We intend this dedication to be an overt act of +// relinquishment in perpetuity of all present and future rights to this +// software under copyright law. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +// IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +// OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +// OTHER DEALINGS IN THE SOFTWARE. +// +// For more information, please refer to + +mod xcm_config; + +// Substrate and Polkadot dependencies +use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; +use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; +use frame_support::{ + derive_impl, + dispatch::DispatchClass, + parameter_types, + traits::{ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, TransformOrigin}, + weights::{ConstantMultiplier, Weight}, + PalletId, +}; +use frame_system::{ + limits::{BlockLength, BlockWeights}, + EnsureRoot, +}; +use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; +use parachains_common::message_queue::{NarrowOriginToSibling, ParaIdToSibling}; +use polkadot_runtime_common::{ + xcm_sender::NoPriceForMessageDelivery, BlockHashCount, SlowAdjustingFeeUpdate, +}; +use sp_consensus_aura::sr25519::AuthorityId as AuraId; +use sp_runtime::Perbill; +use sp_version::RuntimeVersion; +use xcm::latest::prelude::BodyId; + +// Local module imports +use super::{ + weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}, + AccountId, Aura, Balance, Balances, Block, BlockNumber, CollatorSelection, Hash, MessageQueue, + Nonce, PalletInfo, ParachainSystem, Runtime, RuntimeCall, RuntimeEvent, RuntimeFreezeReason, + RuntimeHoldReason, RuntimeOrigin, RuntimeTask, Session, SessionKeys, System, WeightToFee, + XcmpQueue, AVERAGE_ON_INITIALIZE_RATIO, BLOCK_PROCESSING_VELOCITY, EXISTENTIAL_DEPOSIT, HOURS, + MAXIMUM_BLOCK_WEIGHT, MICROUNIT, NORMAL_DISPATCH_RATIO, RELAY_CHAIN_SLOT_DURATION_MILLIS, + SLOT_DURATION, UNINCLUDED_SEGMENT_CAPACITY, VERSION, +}; +use xcm_config::{RelayLocation, XcmOriginToTransactDispatchOrigin}; + +parameter_types! { + pub const Version: RuntimeVersion = VERSION; + + // This part is copied from Substrate's `bin/node/runtime/src/lib.rs`. + // The `RuntimeBlockLength` and `RuntimeBlockWeights` exist here because the + // `DeletionWeightLimit` and `DeletionQueueDepth` depend on those to parameterize + // the lazy contract deletion. + pub RuntimeBlockLength: BlockLength = + BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); + pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() + .base_block(BlockExecutionWeight::get()) + .for_class(DispatchClass::all(), |weights| { + weights.base_extrinsic = ExtrinsicBaseWeight::get(); + }) + .for_class(DispatchClass::Normal, |weights| { + weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); + }) + .for_class(DispatchClass::Operational, |weights| { + weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); + // Operational transactions have some extra reserved space, so that they + // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. + weights.reserved = Some( + MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT + ); + }) + .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) + .build_or_panic(); + pub const SS58Prefix: u16 = 42; +} + +/// The default types are being injected by [`derive_impl`](`frame_support::derive_impl`) from +/// [`ParaChainDefaultConfig`](`struct@frame_system::config_preludes::ParaChainDefaultConfig`), +/// but overridden as needed. +#[derive_impl(frame_system::config_preludes::ParaChainDefaultConfig)] +impl frame_system::Config for Runtime { + /// The identifier used to distinguish between accounts. + type AccountId = AccountId; + /// The index type for storing how many extrinsics an account has signed. + type Nonce = Nonce; + /// The type for hashing blocks and tries. + type Hash = Hash; + /// The block type. + type Block = Block; + /// Maximum number of block number to block hash mappings to keep (oldest pruned first). + type BlockHashCount = BlockHashCount; + /// Runtime version. + type Version = Version; + /// The data to be stored in an account. + type AccountData = pallet_balances::AccountData; + /// The weight of database operations that the runtime can invoke. + type DbWeight = RocksDbWeight; + /// Block & extrinsics weights: base values and limits. + type BlockWeights = RuntimeBlockWeights; + /// The maximum length of a block (in bytes). + type BlockLength = RuntimeBlockLength; + /// This is used as an identifier of the chain. 42 is the generic substrate prefix. + type SS58Prefix = SS58Prefix; + /// The action to take on a Runtime Upgrade + type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +impl pallet_timestamp::Config for Runtime { + /// A timestamp: milliseconds since the unix epoch. + type Moment = u64; + type OnTimestampSet = Aura; + type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>; + type WeightInfo = (); +} + +impl pallet_authorship::Config for Runtime { + type FindAuthor = pallet_session::FindAccountFromAuthorIndex; + type EventHandler = (CollatorSelection,); +} + +parameter_types! { + pub const ExistentialDeposit: Balance = EXISTENTIAL_DEPOSIT; +} + +impl pallet_balances::Config for Runtime { + type MaxLocks = ConstU32<50>; + /// The type for recording an account's balance. + type Balance = Balance; + /// The ubiquitous event type. + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = pallet_balances::weights::SubstrateWeight; + type MaxReserves = ConstU32<50>; + type ReserveIdentifier = [u8; 8]; + type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; + type FreezeIdentifier = (); + type MaxFreezes = ConstU32<0>; +} + +parameter_types! { + /// Relay Chain `TransactionByteFee` / 10 + pub const TransactionByteFee: Balance = 10 * MICROUNIT; +} + +impl pallet_transaction_payment::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type OnChargeTransaction = pallet_transaction_payment::CurrencyAdapter; + type WeightToFee = WeightToFee; + type LengthToFee = ConstantMultiplier; + type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type OperationalFeeMultiplier = ConstU8<5>; +} + +impl pallet_sudo::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type WeightInfo = (); +} + +parameter_types! { + pub const ReservedXcmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); + pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); + pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; +} + +impl cumulus_pallet_parachain_system::Config for Runtime { + type WeightInfo = (); + type RuntimeEvent = RuntimeEvent; + type OnSystemEvent = (); + type SelfParaId = parachain_info::Pallet; + type OutboundXcmpMessageSource = XcmpQueue; + type DmpQueue = frame_support::traits::EnqueueWithOrigin; + type ReservedDmpWeight = ReservedDmpWeight; + type XcmpMessageHandler = XcmpQueue; + type ReservedXcmpWeight = ReservedXcmpWeight; + type CheckAssociatedRelayNumber = RelayNumberStrictlyIncreases; + type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< + Runtime, + RELAY_CHAIN_SLOT_DURATION_MILLIS, + BLOCK_PROCESSING_VELOCITY, + UNINCLUDED_SEGMENT_CAPACITY, + >; +} + +impl parachain_info::Config for Runtime {} + +parameter_types! { + pub MessageQueueServiceWeight: Weight = Perbill::from_percent(35) * RuntimeBlockWeights::get().max_block; +} + +impl pallet_message_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = (); + #[cfg(feature = "runtime-benchmarks")] + type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor< + cumulus_primitives_core::AggregateMessageOrigin, + >; + #[cfg(not(feature = "runtime-benchmarks"))] + type MessageProcessor = xcm_builder::ProcessXcmMessage< + AggregateMessageOrigin, + xcm_executor::XcmExecutor, + RuntimeCall, + >; + type Size = u32; + // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: + type QueueChangeHandler = NarrowOriginToSibling; + type QueuePausedQuery = NarrowOriginToSibling; + type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type MaxStale = sp_core::ConstU32<8>; + type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = (); +} + +impl cumulus_pallet_aura_ext::Config for Runtime {} + +impl cumulus_pallet_xcmp_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type ChannelInfo = ParachainSystem; + type VersionWrapper = (); + // Enqueue XCMP messages from siblings for later processing. + type XcmpQueue = TransformOrigin; + type MaxInboundSuspended = sp_core::ConstU32<1_000>; + type ControllerOrigin = EnsureRoot; + type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; + type WeightInfo = (); + type PriceForSiblingDelivery = NoPriceForMessageDelivery; +} + +parameter_types! { + pub const Period: u32 = 6 * HOURS; + pub const Offset: u32 = 0; +} + +impl pallet_session::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type ValidatorId = ::AccountId; + // we don't have stash and controller, thus we don't need the convert as well. + type ValidatorIdOf = pallet_collator_selection::IdentityCollator; + type ShouldEndSession = pallet_session::PeriodicSessions; + type NextSessionRotation = pallet_session::PeriodicSessions; + type SessionManager = CollatorSelection; + // Essentially just Aura, but let's be pedantic. + type SessionHandler = ::KeyTypeIdProviders; + type Keys = SessionKeys; + type WeightInfo = (); +} + +impl pallet_aura::Config for Runtime { + type AuthorityId = AuraId; + type DisabledValidators = (); + type MaxAuthorities = ConstU32<100_000>; + type AllowMultipleBlocksPerSlot = ConstBool; + type SlotDuration = pallet_aura::MinimumPeriodTimesTwo; +} + +parameter_types! { + pub const PotId: PalletId = PalletId(*b"PotStake"); + pub const SessionLength: BlockNumber = 6 * HOURS; + // StakingAdmin pluralistic body. + pub const StakingAdminBodyId: BodyId = BodyId::Defense; +} + +/// We allow root and the StakingAdmin to execute privileged collator selection operations. +pub type CollatorSelectionUpdateOrigin = EitherOfDiverse< + EnsureRoot, + EnsureXcm>, +>; + +impl pallet_collator_selection::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type UpdateOrigin = CollatorSelectionUpdateOrigin; + type PotId = PotId; + type MaxCandidates = ConstU32<100>; + type MinEligibleCollators = ConstU32<4>; + type MaxInvulnerables = ConstU32<20>; + // should be a multiple of session or things will get inconsistent + type KickThreshold = Period; + type ValidatorId = ::AccountId; + type ValidatorIdOf = pallet_collator_selection::IdentityCollator; + type ValidatorRegistration = Session; + type WeightInfo = (); +} + +/// Configure the pallet template in pallets/template. +impl pallet_parachain_template::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = pallet_parachain_template::weights::SubstrateWeight; +} diff --git a/templates/parachain/runtime/src/xcm_config.rs b/templates/parachain/runtime/src/configs/xcm_config.rs similarity index 99% rename from templates/parachain/runtime/src/xcm_config.rs rename to templates/parachain/runtime/src/configs/xcm_config.rs index 7dce7164888634bba7a12406cd9d1868d2f26795..13da2363b053467ab02c689dfc14ee67118f543e 100644 --- a/templates/parachain/runtime/src/xcm_config.rs +++ b/templates/parachain/runtime/src/configs/xcm_config.rs @@ -1,4 +1,4 @@ -use super::{ +use crate::{ AccountId, AllPalletsWithSystem, Balances, ParachainInfo, ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, WeightToFee, XcmpQueue, }; diff --git a/templates/parachain/runtime/src/lib.rs b/templates/parachain/runtime/src/lib.rs index ad21b79a5b1baf48759e7696c7233a9bdf9bdc38..e2da9309ecc76371f74d19955e91fc02efe6ebde 100644 --- a/templates/parachain/runtime/src/lib.rs +++ b/templates/parachain/runtime/src/lib.rs @@ -6,19 +6,15 @@ #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); +pub mod apis; +mod configs; mod weights; -pub mod xcm_config; -use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; -use polkadot_runtime_common::xcm_sender::NoPriceForMessageDelivery; use smallvec::smallvec; -use sp_api::impl_runtime_apis; -use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::{BlakeTwo256, Block as BlockT, IdentifyAccount, Verify}, - transaction_validity::{TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, MultiSignature, + traits::{BlakeTwo256, IdentifyAccount, Verify}, + MultiSignature, }; use sp_std::prelude::*; @@ -26,41 +22,20 @@ use sp_std::prelude::*; use sp_version::NativeVersion; use sp_version::RuntimeVersion; -use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; use frame_support::{ - construct_runtime, derive_impl, - dispatch::DispatchClass, - genesis_builder_helper::{build_config, create_default_config}, - parameter_types, - traits::{ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, TransformOrigin}, + construct_runtime, weights::{ - constants::WEIGHT_REF_TIME_PER_SECOND, ConstantMultiplier, Weight, WeightToFeeCoefficient, + constants::WEIGHT_REF_TIME_PER_SECOND, Weight, WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial, }, - PalletId, }; -use frame_system::{ - limits::{BlockLength, BlockWeights}, - EnsureRoot, -}; -use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; -use parachains_common::message_queue::{NarrowOriginToSibling, ParaIdToSibling}; pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; pub use sp_runtime::{MultiAddress, Perbill, Permill}; -use xcm_config::{RelayLocation, XcmOriginToTransactDispatchOrigin}; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; -// Polkadot imports -use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; - -use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; - -use pallet_aura::Authorities; - -// XCM Imports -use xcm::latest::prelude::BodyId; +use weights::ExtrinsicBaseWeight; /// Import the template pallet. pub use pallet_parachain_template; @@ -187,7 +162,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { authoring_version: 1, spec_version: 1, impl_version: 0, - apis: RUNTIME_API_VERSIONS, + apis: apis::RUNTIME_API_VERSIONS, transaction_version: 1, state_version: 1, }; @@ -246,253 +221,6 @@ pub fn native_version() -> NativeVersion { NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } } -parameter_types! { - pub const Version: RuntimeVersion = VERSION; - - // This part is copied from Substrate's `bin/node/runtime/src/lib.rs`. - // The `RuntimeBlockLength` and `RuntimeBlockWeights` exist here because the - // `DeletionWeightLimit` and `DeletionQueueDepth` depend on those to parameterize - // the lazy contract deletion. - pub RuntimeBlockLength: BlockLength = - BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); - pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() - .base_block(BlockExecutionWeight::get()) - .for_class(DispatchClass::all(), |weights| { - weights.base_extrinsic = ExtrinsicBaseWeight::get(); - }) - .for_class(DispatchClass::Normal, |weights| { - weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); - }) - .for_class(DispatchClass::Operational, |weights| { - weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); - // Operational transactions have some extra reserved space, so that they - // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. - weights.reserved = Some( - MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT - ); - }) - .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) - .build_or_panic(); - pub const SS58Prefix: u16 = 42; -} - -/// The default types are being injected by [`derive_impl`](`frame_support::derive_impl`) from -/// [`ParaChainDefaultConfig`](`struct@frame_system::config_preludes::ParaChainDefaultConfig`), -/// but overridden as needed. -#[derive_impl(frame_system::config_preludes::ParaChainDefaultConfig)] -impl frame_system::Config for Runtime { - /// The identifier used to distinguish between accounts. - type AccountId = AccountId; - /// The index type for storing how many extrinsics an account has signed. - type Nonce = Nonce; - /// The type for hashing blocks and tries. - type Hash = Hash; - /// The block type. - type Block = Block; - /// Maximum number of block number to block hash mappings to keep (oldest pruned first). - type BlockHashCount = BlockHashCount; - /// Runtime version. - type Version = Version; - /// The data to be stored in an account. - type AccountData = pallet_balances::AccountData; - /// The weight of database operations that the runtime can invoke. - type DbWeight = RocksDbWeight; - /// Block & extrinsics weights: base values and limits. - type BlockWeights = RuntimeBlockWeights; - /// The maximum length of a block (in bytes). - type BlockLength = RuntimeBlockLength; - /// This is used as an identifier of the chain. 42 is the generic substrate prefix. - type SS58Prefix = SS58Prefix; - /// The action to take on a Runtime Upgrade - type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; - type MaxConsumers = frame_support::traits::ConstU32<16>; -} - -impl pallet_timestamp::Config for Runtime { - /// A timestamp: milliseconds since the unix epoch. - type Moment = u64; - type OnTimestampSet = Aura; - type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>; - type WeightInfo = (); -} - -impl pallet_authorship::Config for Runtime { - type FindAuthor = pallet_session::FindAccountFromAuthorIndex; - type EventHandler = (CollatorSelection,); -} - -parameter_types! { - pub const ExistentialDeposit: Balance = EXISTENTIAL_DEPOSIT; -} - -impl pallet_balances::Config for Runtime { - type MaxLocks = ConstU32<50>; - /// The type for recording an account's balance. - type Balance = Balance; - /// The ubiquitous event type. - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - type WeightInfo = pallet_balances::weights::SubstrateWeight; - type MaxReserves = ConstU32<50>; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<0>; -} - -parameter_types! { - /// Relay Chain `TransactionByteFee` / 10 - pub const TransactionByteFee: Balance = 10 * MICROUNIT; -} - -impl pallet_transaction_payment::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type OnChargeTransaction = pallet_transaction_payment::CurrencyAdapter; - type WeightToFee = WeightToFee; - type LengthToFee = ConstantMultiplier; - type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; - type OperationalFeeMultiplier = ConstU8<5>; -} - -impl pallet_sudo::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; - type WeightInfo = (); -} - -parameter_types! { - pub const ReservedXcmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); - pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); - pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; -} - -impl cumulus_pallet_parachain_system::Config for Runtime { - type WeightInfo = (); - type RuntimeEvent = RuntimeEvent; - type OnSystemEvent = (); - type SelfParaId = parachain_info::Pallet; - type OutboundXcmpMessageSource = XcmpQueue; - type DmpQueue = frame_support::traits::EnqueueWithOrigin; - type ReservedDmpWeight = ReservedDmpWeight; - type XcmpMessageHandler = XcmpQueue; - type ReservedXcmpWeight = ReservedXcmpWeight; - type CheckAssociatedRelayNumber = RelayNumberStrictlyIncreases; - type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< - Runtime, - RELAY_CHAIN_SLOT_DURATION_MILLIS, - BLOCK_PROCESSING_VELOCITY, - UNINCLUDED_SEGMENT_CAPACITY, - >; -} - -impl parachain_info::Config for Runtime {} - -parameter_types! { - pub MessageQueueServiceWeight: Weight = Perbill::from_percent(35) * RuntimeBlockWeights::get().max_block; -} - -impl pallet_message_queue::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = (); - #[cfg(feature = "runtime-benchmarks")] - type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor< - cumulus_primitives_core::AggregateMessageOrigin, - >; - #[cfg(not(feature = "runtime-benchmarks"))] - type MessageProcessor = xcm_builder::ProcessXcmMessage< - AggregateMessageOrigin, - xcm_executor::XcmExecutor, - RuntimeCall, - >; - type Size = u32; - // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: - type QueueChangeHandler = NarrowOriginToSibling; - type QueuePausedQuery = NarrowOriginToSibling; - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; - type MaxStale = sp_core::ConstU32<8>; - type ServiceWeight = MessageQueueServiceWeight; -} - -impl cumulus_pallet_aura_ext::Config for Runtime {} - -impl cumulus_pallet_xcmp_queue::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type ChannelInfo = ParachainSystem; - type VersionWrapper = (); - // Enqueue XCMP messages from siblings for later processing. - type XcmpQueue = TransformOrigin; - type MaxInboundSuspended = sp_core::ConstU32<1_000>; - type ControllerOrigin = EnsureRoot; - type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; - type WeightInfo = (); - type PriceForSiblingDelivery = NoPriceForMessageDelivery; -} - -parameter_types! { - pub const Period: u32 = 6 * HOURS; - pub const Offset: u32 = 0; -} - -impl pallet_session::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type ValidatorId = ::AccountId; - // we don't have stash and controller, thus we don't need the convert as well. - type ValidatorIdOf = pallet_collator_selection::IdentityCollator; - type ShouldEndSession = pallet_session::PeriodicSessions; - type NextSessionRotation = pallet_session::PeriodicSessions; - type SessionManager = CollatorSelection; - // Essentially just Aura, but let's be pedantic. - type SessionHandler = ::KeyTypeIdProviders; - type Keys = SessionKeys; - type WeightInfo = (); -} - -impl pallet_aura::Config for Runtime { - type AuthorityId = AuraId; - type DisabledValidators = (); - type MaxAuthorities = ConstU32<100_000>; - type AllowMultipleBlocksPerSlot = ConstBool; - type SlotDuration = pallet_aura::MinimumPeriodTimesTwo; -} - -parameter_types! { - pub const PotId: PalletId = PalletId(*b"PotStake"); - pub const SessionLength: BlockNumber = 6 * HOURS; - // StakingAdmin pluralistic body. - pub const StakingAdminBodyId: BodyId = BodyId::Defense; -} - -/// We allow root and the StakingAdmin to execute privileged collator selection operations. -pub type CollatorSelectionUpdateOrigin = EitherOfDiverse< - EnsureRoot, - EnsureXcm>, ->; - -impl pallet_collator_selection::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Currency = Balances; - type UpdateOrigin = CollatorSelectionUpdateOrigin; - type PotId = PotId; - type MaxCandidates = ConstU32<100>; - type MinEligibleCollators = ConstU32<4>; - type MaxInvulnerables = ConstU32<20>; - // should be a multiple of session or things will get inconsistent - type KickThreshold = Period; - type ValidatorId = ::AccountId; - type ValidatorIdOf = pallet_collator_selection::IdentityCollator; - type ValidatorRegistration = Session; - type WeightInfo = (); -} - -/// Configure the pallet template in pallets/template. -impl pallet_parachain_template::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = pallet_parachain_template::weights::SubstrateWeight; -} - // Create the runtime by composing the FRAME pallets that were previously configured. construct_runtime!( pub enum Runtime { @@ -542,230 +270,6 @@ mod benches { ); } -impl_runtime_apis! { - impl sp_consensus_aura::AuraApi for Runtime { - fn slot_duration() -> sp_consensus_aura::SlotDuration { - sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) - } - - fn authorities() -> Vec { - Authorities::::get().into_inner() - } - } - - impl sp_api::Core for Runtime { - fn version() -> RuntimeVersion { - VERSION - } - - fn execute_block(block: Block) { - Executive::execute_block(block) - } - - fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { - Executive::initialize_block(header) - } - } - - impl sp_api::Metadata for Runtime { - fn metadata() -> OpaqueMetadata { - OpaqueMetadata::new(Runtime::metadata().into()) - } - - fn metadata_at_version(version: u32) -> Option { - Runtime::metadata_at_version(version) - } - - fn metadata_versions() -> sp_std::vec::Vec { - Runtime::metadata_versions() - } - } - - impl sp_block_builder::BlockBuilder for Runtime { - fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { - Executive::apply_extrinsic(extrinsic) - } - - fn finalize_block() -> ::Header { - Executive::finalize_block() - } - - fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { - data.create_extrinsics() - } - - fn check_inherents( - block: Block, - data: sp_inherents::InherentData, - ) -> sp_inherents::CheckInherentsResult { - data.check_extrinsics(&block) - } - } - - impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { - fn validate_transaction( - source: TransactionSource, - tx: ::Extrinsic, - block_hash: ::Hash, - ) -> TransactionValidity { - Executive::validate_transaction(source, tx, block_hash) - } - } - - impl sp_offchain::OffchainWorkerApi for Runtime { - fn offchain_worker(header: &::Header) { - Executive::offchain_worker(header) - } - } - - impl sp_session::SessionKeys for Runtime { - fn generate_session_keys(seed: Option>) -> Vec { - SessionKeys::generate(seed) - } - - fn decode_session_keys( - encoded: Vec, - ) -> Option, KeyTypeId)>> { - SessionKeys::decode_into_raw_public_keys(&encoded) - } - } - - impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { - fn account_nonce(account: AccountId) -> Nonce { - System::account_nonce(account) - } - } - - impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi for Runtime { - fn query_info( - uxt: ::Extrinsic, - len: u32, - ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo { - TransactionPayment::query_info(uxt, len) - } - fn query_fee_details( - uxt: ::Extrinsic, - len: u32, - ) -> pallet_transaction_payment::FeeDetails { - TransactionPayment::query_fee_details(uxt, len) - } - fn query_weight_to_fee(weight: Weight) -> Balance { - TransactionPayment::weight_to_fee(weight) - } - fn query_length_to_fee(length: u32) -> Balance { - TransactionPayment::length_to_fee(length) - } - } - - impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi - for Runtime - { - fn query_call_info( - call: RuntimeCall, - len: u32, - ) -> pallet_transaction_payment::RuntimeDispatchInfo { - TransactionPayment::query_call_info(call, len) - } - fn query_call_fee_details( - call: RuntimeCall, - len: u32, - ) -> pallet_transaction_payment::FeeDetails { - TransactionPayment::query_call_fee_details(call, len) - } - fn query_weight_to_fee(weight: Weight) -> Balance { - TransactionPayment::weight_to_fee(weight) - } - fn query_length_to_fee(length: u32) -> Balance { - TransactionPayment::length_to_fee(length) - } - } - - impl cumulus_primitives_core::CollectCollationInfo for Runtime { - fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { - ParachainSystem::collect_collation_info(header) - } - } - - #[cfg(feature = "try-runtime")] - impl frame_try_runtime::TryRuntime for Runtime { - fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { - let weight = Executive::try_runtime_upgrade(checks).unwrap(); - (weight, RuntimeBlockWeights::get().max_block) - } - - fn execute_block( - block: Block, - state_root_check: bool, - signature_check: bool, - select: frame_try_runtime::TryStateSelect, - ) -> Weight { - // NOTE: intentional unwrap: we don't want to propagate the error backwards, and want to - // have a backtrace here. - Executive::try_execute_block(block, state_root_check, signature_check, select).unwrap() - } - } - - #[cfg(feature = "runtime-benchmarks")] - impl frame_benchmarking::Benchmark for Runtime { - fn benchmark_metadata(extra: bool) -> ( - Vec, - Vec, - ) { - use frame_benchmarking::{Benchmarking, BenchmarkList}; - use frame_support::traits::StorageInfoTrait; - use frame_system_benchmarking::Pallet as SystemBench; - use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - - let mut list = Vec::::new(); - list_benchmarks!(list, extra); - - let storage_info = AllPalletsWithSystem::storage_info(); - (list, storage_info) - } - - fn dispatch_benchmark( - config: frame_benchmarking::BenchmarkConfig - ) -> Result, sp_runtime::RuntimeString> { - use frame_benchmarking::{BenchmarkError, Benchmarking, BenchmarkBatch}; - - use frame_system_benchmarking::Pallet as SystemBench; - impl frame_system_benchmarking::Config for Runtime { - fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { - ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); - Ok(()) - } - - fn verify_set_code() { - System::assert_last_event(cumulus_pallet_parachain_system::Event::::ValidationFunctionStored.into()); - } - } - - use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - impl cumulus_pallet_session_benchmarking::Config for Runtime {} - - use frame_support::traits::WhitelistedStorageKeys; - let whitelist = AllPalletsWithSystem::whitelisted_storage_keys(); - - let mut batches = Vec::::new(); - let params = (&config, &whitelist); - add_benchmarks!(params, batches); - - if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) } - Ok(batches) - } - } - - impl sp_genesis_builder::GenesisBuilder for Runtime { - fn create_default_config() -> Vec { - create_default_config::() - } - - fn build_config(config: Vec) -> sp_genesis_builder::Result { - build_config::(config) - } - } -} - cumulus_pallet_parachain_system::register_validate_block! { Runtime = Runtime, BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, diff --git a/templates/solochain/node/Cargo.toml b/templates/solochain/node/Cargo.toml index a0bb5c27ed3b56a8656ec55be8c2ad58f87a78d8..1ddd1cb3a7b4efb3da33dad7c80366451f84bd32 100644 --- a/templates/solochain/node/Cargo.toml +++ b/templates/solochain/node/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] clap = { version = "4.5.3", features = ["derive"] } -futures = { version = "0.3.21", features = ["thread-pool"] } +futures = { version = "0.3.30", features = ["thread-pool"] } serde_json = { workspace = true, default-features = true } jsonrpsee = { version = "0.22", features = ["server"] } diff --git a/templates/solochain/pallets/template/Cargo.toml b/templates/solochain/pallets/template/Cargo.toml index bd2347151989df8950dc4dd96f039c74241d28a5..24519f1d22e0d214e418974de65c7f77588498c4 100644 --- a/templates/solochain/pallets/template/Cargo.toml +++ b/templates/solochain/pallets/template/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = [ +scale-info = { version = "2.11.1", default-features = false, features = [ "derive", ] } diff --git a/templates/solochain/runtime/Cargo.toml b/templates/solochain/runtime/Cargo.toml index 33da035878a770f6e65d7d7c0d105fc08a5f4da5..7a81f192043f5bc516ca774e1cebf8c039f06ceb 100644 --- a/templates/solochain/runtime/Cargo.toml +++ b/templates/solochain/runtime/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = [ +scale-info = { version = "2.11.1", default-features = false, features = [ "derive", "serde", ] } @@ -62,7 +62,7 @@ sp-transaction-pool = { path = "../../../substrate/primitives/transaction-pool", sp-version = { path = "../../../substrate/primitives/version", default-features = false, features = [ "serde", ] } -sp-genesis-builder = { default-features = false, path = "../../../substrate/primitives/genesis-builder" } +sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } # RPC related frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false }