diff --git a/.config/taplo.toml b/.config/taplo.toml index a45204923cfff1d357da0be5346117acfaf50843..2c6ccfb2b34440686764c39ed6db1c73ed940f06 100644 --- a/.config/taplo.toml +++ b/.config/taplo.toml @@ -7,6 +7,7 @@ exclude = [ "polkadot/node/malus/integrationtests/**", "polkadot/zombienet_tests/**", "substrate/zombienet/**", + "target/**", ] # global rules diff --git a/.github/scripts/check-prdoc.py b/.github/scripts/check-prdoc.py new file mode 100644 index 0000000000000000000000000000000000000000..42b063f2885da148033986dfa49740f2b0416460 --- /dev/null +++ b/.github/scripts/check-prdoc.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python3 + +''' +Ensure that the prdoc files are valid. + +# Example + +```sh +python3 -m pip install cargo-workspace +python3 .github/scripts/check-prdoc.py Cargo.toml prdoc/*.prdoc +``` + +Produces example output: +```pre +🔎 Reading workspace polkadot-sdk/Cargo.toml +📦 Checking 32 prdocs against 493 crates. +✅ All prdocs are valid +``` +''' + +import os +import yaml +import argparse +import cargo_workspace + +def check_prdoc_crate_names(root, paths): + ''' + Check that all crates of the `crates` section of each prdoc is present in the workspace. + ''' + + print(f'🔎 Reading workspace {root}.') + workspace = cargo_workspace.Workspace.from_path(root) + crate_names = [crate.name for crate in workspace.crates] + + print(f'📦 Checking {len(paths)} prdocs against {len(crate_names)} crates.') + faulty = {} + + for path in paths: + with open(path, 'r') as f: + prdoc = yaml.safe_load(f) + + for crate in prdoc.get('crates', []): + crate = crate['name'] + if crate in crate_names: + continue + + faulty.setdefault(path, []).append(crate) + + if len(faulty) == 0: + print('✅ All prdocs are valid.') + else: + print('❌ Some prdocs are invalid.') + for path, crates in faulty.items(): + print(f'💥 {path} lists invalid crate: {", ".join(crates)}') + exit(1) + +def parse_args(): + parser = argparse.ArgumentParser(description='Check prdoc files') + parser.add_argument('root', help='The cargo workspace manifest', metavar='root', type=str, nargs=1) + parser.add_argument('prdoc', help='The prdoc files', metavar='prdoc', type=str, nargs='*') + args = parser.parse_args() + + if len(args.prdoc) == 0: + print('❌ Need at least one prdoc file as argument.') + exit(1) + + return { 'root': os.path.abspath(args.root[0]), 'prdocs': args.prdoc } + +if __name__ == '__main__': + args = parse_args() + check_prdoc_crate_names(args['root'], args['prdocs']) diff --git a/.github/scripts/check-workspace.py b/.github/scripts/check-workspace.py index d200122fee9f7035dce8c811e7c24c003d9545a4..1f8f103e4e157a8c1c804a618652741193ca5a00 100644 --- a/.github/scripts/check-workspace.py +++ b/.github/scripts/check-workspace.py @@ -18,7 +18,7 @@ def parse_args(): parser.add_argument('workspace_dir', help='The directory to check', metavar='workspace_dir', type=str, nargs=1) parser.add_argument('--exclude', help='Exclude crate paths from the check', metavar='exclude', type=str, nargs='*', default=[]) - + args = parser.parse_args() return (args.workspace_dir[0], args.exclude) @@ -26,7 +26,7 @@ def main(root, exclude): workspace_crates = get_members(root, exclude) all_crates = get_crates(root, exclude) print(f'📦 Found {len(all_crates)} crates in total') - + check_duplicates(workspace_crates) check_missing(workspace_crates, all_crates) check_links(all_crates) @@ -48,14 +48,14 @@ def get_members(workspace_dir, exclude): if not 'members' in root_manifest['workspace']: return [] - + members = [] for member in root_manifest['workspace']['members']: if member in exclude: print(f'❌ Excluded member should not appear in the workspace {member}') sys.exit(1) members.append(member) - + return members # List all members of the workspace. @@ -74,12 +74,12 @@ def get_crates(workspace_dir, exclude_crates) -> dict: with open(path, "r") as f: content = f.read() manifest = toml.loads(content) - + if 'workspace' in manifest: if root != workspace_dir: print("⏩ Excluded recursive workspace at %s" % path) continue - + # Cut off the root path and the trailing /Cargo.toml. path = path[len(workspace_dir)+1:-11] name = manifest['package']['name'] @@ -87,7 +87,7 @@ def get_crates(workspace_dir, exclude_crates) -> dict: print("⏩ Excluded crate %s at %s" % (name, path)) continue crates[name] = (path, manifest) - + return crates # Check that there are no duplicate entries in the workspace. @@ -138,23 +138,23 @@ def check_links(all_crates): if not 'path' in deps[dep]: broken.append((name, dep_name, "crate must be linked via `path`")) return - + def check_crate(deps): to_checks = ['dependencies', 'dev-dependencies', 'build-dependencies'] for to_check in to_checks: if to_check in deps: check_deps(deps[to_check]) - + # There could possibly target dependant deps: if 'target' in manifest: # Target dependant deps can only have one level of nesting: for _, target in manifest['target'].items(): check_crate(target) - + check_crate(manifest) - + links.sort() broken.sort() diff --git a/.github/scripts/common/lib.sh b/.github/scripts/common/lib.sh index bd12d9c6e6ff773f8513189a381d725243e53eb5..29dc269ffd23b1f51e1eb2b87a61544de0cbb57f 100755 --- a/.github/scripts/common/lib.sh +++ b/.github/scripts/common/lib.sh @@ -237,6 +237,61 @@ fetch_release_artifacts() { popd > /dev/null } +# Fetch the release artifacts like binary and sigantures from S3. Assumes the ENV are set: +# - RELEASE_ID +# - GITHUB_TOKEN +# - REPO in the form paritytech/polkadot +fetch_release_artifacts_from_s3() { + echo "Version : $VERSION" + echo "Repo : $REPO" + echo "Binary : $BINARY" + OUTPUT_DIR=${OUTPUT_DIR:-"./release-artifacts/${BINARY}"} + echo "OUTPUT_DIR : $OUTPUT_DIR" + + URL_BASE=$(get_s3_url_base $BINARY) + echo "URL_BASE=$URL_BASE" + + URL_BINARY=$URL_BASE/$VERSION/$BINARY + URL_SHA=$URL_BASE/$VERSION/$BINARY.sha256 + URL_ASC=$URL_BASE/$VERSION/$BINARY.asc + + # Fetch artifacts + mkdir -p "$OUTPUT_DIR" + pushd "$OUTPUT_DIR" > /dev/null + + echo "Fetching artifacts..." + for URL in $URL_BINARY $URL_SHA $URL_ASC; do + echo "Fetching %s" "$URL" + curl --progress-bar -LO "$URL" || echo "Missing $URL" + done + + pwd + ls -al --color + popd > /dev/null + +} + +# Pass the name of the binary as input, it will +# return the s3 base url +function get_s3_url_base() { + name=$1 + case $name in + polkadot | polkadot-execute-worker | polkadot-prepare-worker | staking-miner) + printf "https://releases.parity.io/polkadot" + ;; + + polkadot-parachain) + printf "https://releases.parity.io/cumulus" + ;; + + *) + printf "UNSUPPORTED BINARY $name" + exit 1 + ;; + esac +} + + # Check the checksum for a given binary function check_sha256() { echo "Checking SHA256 for $1" @@ -248,13 +303,11 @@ function check_sha256() { function import_gpg_keys() { GPG_KEYSERVER=${GPG_KEYSERVER:-"keyserver.ubuntu.com"} SEC="9D4B2B6EB8F97156D19669A9FF0812D491B96798" - WILL="2835EAF92072BC01D188AF2C4A092B93E97CE1E2" EGOR="E6FC4D4782EB0FA64A4903CCDB7D3555DD3932D3" - MARA="533C920F40E73A21EEB7E9EBF27AEA7E7594C9CF" MORGAN="2E92A9D8B15D7891363D1AE8AF9E6C43F7F8C4CF" echo "Importing GPG keys from $GPG_KEYSERVER in parallel" - for key in $SEC $WILL $EGOR $MARA $MORGAN; do + for key in $SEC $EGOR $MORGAN; do ( echo "Importing GPG key $key" gpg --no-tty --quiet --keyserver $GPG_KEYSERVER --recv-keys $key @@ -344,3 +397,40 @@ function find_runtimes() { done echo $JSON } + +# Filter the version matches the particular pattern and return it. +# input: version (v1.8.0 or v1.8.0-rc1) +# output: none +filter_version_from_input() { + version=$1 + regex="(^v[0-9]+\.[0-9]+\.[0-9]+)$|(^v[0-9]+\.[0-9]+\.[0-9]+-rc[0-9]+)$" + + if [[ $version =~ $regex ]]; then + if [ -n "${BASH_REMATCH[1]}" ]; then + echo "${BASH_REMATCH[1]}" + elif [ -n "${BASH_REMATCH[2]}" ]; then + echo "${BASH_REMATCH[2]}" + fi + else + echo "Invalid version: $version" + exit 1 + fi + +} + +# Check if the release_id is valid number +# input: release_id +# output: release_id or exit 1 +check_release_id() { + input=$1 + + release_id=$(echo "$input" | sed 's/[^0-9]//g') + + if [[ $release_id =~ ^[0-9]+$ ]]; then + echo "$release_id" + else + echo "Invalid release_id from input: $input" + exit 1 + fi + +} diff --git a/.github/workflows/check-licenses.yml b/.github/workflows/check-licenses.yml index e1e92d288ceae235d23fa36c31d592092fe8b0ba..c32b6fcf89e06bb56cefc0517e1dcab1d1ef0f37 100644 --- a/.github/workflows/check-licenses.yml +++ b/.github/workflows/check-licenses.yml @@ -42,5 +42,4 @@ jobs: shopt -s globstar npx @paritytech/license-scanner scan \ --ensure-licenses ${{ env.LICENSES }} \ - --exclude ./substrate/bin/node-template \ -- ./substrate/**/*.rs diff --git a/.github/workflows/check-prdoc.yml b/.github/workflows/check-prdoc.yml index 5503b61d681728b0f1b8adb5af60f9e37b8afee2..c31dee06ec54a0154efc3ad46ff24c79de4d0d7b 100644 --- a/.github/workflows/check-prdoc.yml +++ b/.github/workflows/check-prdoc.yml @@ -56,4 +56,12 @@ jobs: run: | echo "Checking for PR#${GITHUB_PR}" echo "You can find more information about PRDoc at $PRDOC_DOC" - $ENGINE run --rm -v $PWD:/repo $IMAGE check -n ${GITHUB_PR} + $ENGINE run --rm -v $PWD:/repo -e RUST_LOG=info $IMAGE check -n ${GITHUB_PR} + + - name: Validate prdoc for PR#${{ github.event.pull_request.number }} + if: ${{ !contains(steps.get-labels.outputs.labels, 'R0') }} + run: | + echo "Validating PR#${GITHUB_PR}" + python3 --version + python3 -m pip install cargo-workspace==1.2.1 + python3 .github/scripts/check-prdoc.py Cargo.toml prdoc/pr_${GITHUB_PR}.prdoc diff --git a/.github/workflows/release-50_publish-docker.yml b/.github/workflows/release-50_publish-docker.yml index ecbac01cd3a5b2aaed679cfaf2ade0b04900531a..67e93ee96574de1f1e3e29f1bf6d90085865100d 100644 --- a/.github/workflows/release-50_publish-docker.yml +++ b/.github/workflows/release-50_publish-docker.yml @@ -36,7 +36,7 @@ on: -H "Authorization: Bearer ${GITHUB_TOKEN}" https://api.github.com/repos/$OWNER/$REPO/releases | \ jq '.[] | { name: .name, id: .id }' required: true - type: string + type: number registry: description: Container registry @@ -61,7 +61,6 @@ permissions: contents: write env: - RELEASE_ID: ${{ inputs.release_id }} ENGINE: docker REGISTRY: ${{ inputs.registry }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} @@ -71,6 +70,7 @@ env: # EVENT_ACTION: ${{ github.event.action }} EVENT_NAME: ${{ github.event_name }} IMAGE_TYPE: ${{ inputs.image_type }} + VERSION: ${{ inputs.version }} jobs: fetch-artifacts: # this job will be triggered for the polkadot-parachain rc and release or polkadot rc image build @@ -95,13 +95,16 @@ jobs: # chmod a+x $BINARY # ls -al - - name: Fetch rc artifacts or release artifacts based on release id + - name: Fetch rc artifacts or release artifacts from s3 based on version #this step runs only if the workflow is triggered manually if: ${{ env.EVENT_NAME == 'workflow_dispatch' }} run: | . ./.github/scripts/common/lib.sh - fetch_release_artifacts + VERSION=$(filter_version_from_input "${{ inputs.version }}") + echo "VERSION=${VERSION}" >> $GITHUB_ENV + + fetch_release_artifacts_from_s3 - name: Cache the artifacts uses: actions/cache@e12d46a63a90f2fae62d114769bbf2a179198b5c # v3.3.3 @@ -147,7 +150,10 @@ jobs: if: ${{ env.IMAGE_TYPE == 'rc' }} id: fetch_rc_refs run: | - release=release-${{ inputs.release_id }} && \ + . ./.github/scripts/common/lib.sh + + RELEASE_ID=$(check_release_id "${{ inputs.release_id }}") + release=release-$RELEASE_ID && \ echo "release=${release}" >> $GITHUB_OUTPUT commit=$(git rev-parse --short HEAD) && \ diff --git a/.github/workflows/release-99_notif-published.yml b/.github/workflows/release-99_notif-published.yml index b35120ca4e128beaa37047b0ac3f21b02f4da663..05c9d6a47f551860c51e318b01b495ca662e902e 100644 --- a/.github/workflows/release-99_notif-published.yml +++ b/.github/workflows/release-99_notif-published.yml @@ -8,22 +8,14 @@ on: jobs: ping_matrix: runs-on: ubuntu-latest + environment: release strategy: matrix: channel: # Internal - - name: 'RelEng: Cumulus Release Coordination' - room: '!NAEMyPAHWOiOQHsvus:parity.io' - pre-releases: true - name: "RelEng: Polkadot Release Coordination" room: '!cqAmzdIcbOFwrdrubV:parity.io' pre-release: true - - name: 'General: Rust, Polkadot, Substrate' - room: '!aJymqQYtCjjqImFLSb:parity.io' - pre-release: false - - name: 'Team: DevOps' - room: '!lUslSijLMgNcEKcAiE:parity.io' - pre-release: true # External - name: 'Ledger <> Polkadot Coordination' @@ -31,18 +23,15 @@ jobs: pre-release: true # Public - # - name: '#KusamaValidatorLounge:polkadot.builders' - # room: '!LhjZccBOqFNYKLdmbb:polkadot.builders' - # pre-releases: false - # - name: '#kusama-announcements:matrix.parity.io' - # room: '!FMwxpQnYhRCNDRsYGI:matrix.parity.io' - # pre-release: false - # - name: '#polkadotvalidatorlounge:web3.foundation' - # room: '!NZrbtteFeqYKCUGQtr:matrix.parity.io' - # pre-release: false - # - name: '#polkadot-announcements:matrix.parity.io' - # room: '!UqHPWiCBGZWxrmYBkF:matrix.parity.io' - # pre-release: false + - name: '#polkadotvalidatorlounge:web3.foundation' + room: '!NZrbtteFeqYKCUGQtr:matrix.parity.io' + pre-releases: false + - name: '#polkadot-announcements:parity.io' + room: '!UqHPWiCBGZWxrmYBkF:matrix.parity.io' + pre-releases: false + - name: '#kusama-announce:parity.io' + room: '!FMwxpQnYhRCNDRsYGI:matrix.parity.io' + pre-releases: false steps: - name: Matrix notification to ${{ matrix.channel.name }} @@ -53,7 +42,9 @@ jobs: access_token: ${{ secrets.RELEASENOTES_MATRIX_V2_ACCESS_TOKEN }} server: m.parity.io message: | - A (pre)release has been ${{github.event.action}} in **${{github.event.repository.full_name}}:**
+ @room + + A new node release has been ${{github.event.action}} in **${{github.event.repository.full_name}}:**
Release version: [${{github.event.release.tag_name}}](${{github.event.release.html_url}}) ----- diff --git a/.gitlab/pipeline/build.yml b/.gitlab/pipeline/build.yml index 15b4869997be186c71cecbc89060b7591d71ffa3..f8de6135572565d9d16465e68aa3f0bace915cc5 100644 --- a/.gitlab/pipeline/build.yml +++ b/.gitlab/pipeline/build.yml @@ -91,7 +91,7 @@ build-rustdoc: - .run-immediately variables: SKIP_WASM_BUILD: 1 - RUSTDOCFLAGS: "" + RUSTDOCFLAGS: "--default-theme=ayu --html-in-header ./docs/sdk/headers/header.html --extend-css ./docs/sdk/headers/theme.css" artifacts: name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}-doc" when: on_success @@ -337,7 +337,7 @@ build-runtimes-polkavm: - .common-refs - .run-immediately script: - - SUBSTRATE_RUNTIME_TARGET=riscv cargo check -p minimal-runtime + - SUBSTRATE_RUNTIME_TARGET=riscv cargo check -p minimal-template-runtime - SUBSTRATE_RUNTIME_TARGET=riscv cargo check -p westend-runtime - SUBSTRATE_RUNTIME_TARGET=riscv cargo check -p rococo-runtime - SUBSTRATE_RUNTIME_TARGET=riscv cargo check -p polkadot-test-runtime diff --git a/.gitlab/pipeline/check.yml b/.gitlab/pipeline/check.yml index 1ed12e68c2ce19b67dd5aca03cec85702351c039..52da33550508ede16c9577346e6985d869b5e8ae 100644 --- a/.gitlab/pipeline/check.yml +++ b/.gitlab/pipeline/check.yml @@ -108,8 +108,10 @@ check-toml-format: export RUST_LOG=remote-ext=debug,runtime=debug echo "---------- Downloading try-runtime CLI ----------" - curl -sL https://github.com/paritytech/try-runtime-cli/releases/download/v0.5.0/try-runtime-x86_64-unknown-linux-musl -o try-runtime + curl -sL https://github.com/paritytech/try-runtime-cli/releases/download/v0.5.4/try-runtime-x86_64-unknown-linux-musl -o try-runtime chmod +x ./try-runtime + echo "Using try-runtime-cli version:" + ./try-runtime --version echo "---------- Building ${PACKAGE} runtime ----------" time cargo build --release --locked -p "$PACKAGE" --features try-runtime @@ -133,6 +135,7 @@ check-runtime-migration-westend: WASM: "westend_runtime.compact.compressed.wasm" URI: "wss://westend-try-runtime-node.parity-chains.parity.io:443" SUBCOMMAND_EXTRA_ARGS: "--no-weight-warnings" + allow_failure: true check-runtime-migration-rococo: stage: check @@ -256,3 +259,19 @@ find-fail-ci-phrase: echo "No $ASSERT_REGEX was found, exiting with 0"; exit 0; fi + +check-core-crypto-features: + stage: check + extends: + - .docker-env + - .common-refs + script: + - pushd substrate/primitives/core + - ./check-features-variants.sh + - popd + - pushd substrate/primitives/application-crypto + - ./check-features-variants.sh + - popd + - pushd substrate/primitives/keyring + - ./check-features-variants.sh + - popd diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml index b5e26d194896aad7839dce34460409fe9ceaa045..5c41a3e6e08fae521c41a66735ac54df51e39057 100644 --- a/.gitlab/pipeline/test.yml +++ b/.gitlab/pipeline/test.yml @@ -25,6 +25,7 @@ test-linux-stable: # "upgrade_version_checks_should_work" is currently failing - | time cargo nextest run \ + --filter-expr 'not deps(/polkadot-subsystem-bench/)' \ --workspace \ --locked \ --release \ @@ -48,6 +49,7 @@ test-linux-stable: - target/nextest/default/junit.xml reports: junit: target/nextest/default/junit.xml + timeout: 90m test-linux-oldkernel-stable: extends: test-linux-stable @@ -68,7 +70,7 @@ test-linux-stable-runtime-benchmarks: # but still want to have debug assertions. RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" script: - - time cargo nextest run --workspace --features runtime-benchmarks benchmark --locked --cargo-profile testnet + - time cargo nextest run --filter-expr 'not deps(/polkadot-subsystem-bench/)' --workspace --features runtime-benchmarks benchmark --locked --cargo-profile testnet # can be used to run all tests # test-linux-stable-all: diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml index 54eb6db48cae63d9cfb08cf7da61125028904371..97572f029d0020f090a8fd16839028ac9f088cf9 100644 --- a/.gitlab/pipeline/zombienet/polkadot.yml +++ b/.gitlab/pipeline/zombienet/polkadot.yml @@ -158,6 +158,14 @@ zombienet-polkadot-functional-0011-async-backing-6-seconds-rate: --local-dir="${LOCAL_DIR}/functional" --test="0011-async-backing-6-seconds-rate.zndsl" +zombienet-polkadot-functional-0012-elastic-scaling-mvp: + extends: + - .zombienet-polkadot-common + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/functional" + --test="0012-elastic-scaling-mvp.zndsl" + zombienet-polkadot-smoke-0001-parachains-smoke-test: extends: - .zombienet-polkadot-common diff --git a/Cargo.lock b/Cargo.lock index 1fb63b6ecf76f89ba613955a88d4d273bf60b662..7f5f7d6cbcb54a478e077f7d5d22fbc96859734d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -191,7 +191,7 @@ checksum = "c0391754c09fab4eae3404d19d0d297aa1c670c1775ab51d8a5312afeca23157" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -206,7 +206,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", "syn-solidity", "tiny-keccak", ] @@ -322,6 +322,20 @@ dependencies = [ "num-traits", ] +[[package]] +name = "aquamarine" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1da02abba9f9063d786eab1509833ebb2fac0f966862ca59439c76b9c566760" +dependencies = [ + "include_dir", + "itertools 0.10.5", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "aquamarine" version = "0.5.0" @@ -333,7 +347,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -730,12 +744,6 @@ dependencies = [ "nodrop", ] -[[package]] -name = "arrayvec" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" - [[package]] name = "arrayvec" version = "0.7.4" @@ -1050,11 +1058,11 @@ dependencies = [ "frame-support", "frame-system", "hex-literal", - "pallet-asset-conversion", "pallet-assets", "pallet-balances", "pallet-collator-selection", "pallet-session", + "pallet-timestamp", "pallet-xcm", "pallet-xcm-bridge-hub-router", "parachains-common", @@ -1218,7 +1226,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -1235,7 +1243,7 @@ checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -1320,7 +1328,7 @@ dependencies = [ "ark-std 0.4.0", "dleq_vrf", "fflonk", - "merlin 3.0.0", + "merlin", "rand_chacha 0.3.1", "rand_core 0.6.4", "ring 0.1.0", @@ -1417,7 +1425,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -1426,9 +1434,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93f2635620bf0b9d4576eb7bb9a38a55df78bd1205d26fa994b25911a69f212f" dependencies = [ - "bitcoin_hashes", - "rand", - "rand_core 0.6.4", + "bitcoin_hashes 0.11.0", "serde", "unicode-normalization", ] @@ -1448,12 +1454,28 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" +[[package]] +name = "bitcoin-internals" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9425c3bf7089c983facbae04de54513cce73b41c7f9ff8c845b54e7bc64ebbfb" + [[package]] name = "bitcoin_hashes" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90064b8dee6815a6470d60bad07bbbaee885c0e12d04177138fa3291a01b7bc4" +[[package]] +name = "bitcoin_hashes" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1930a4dabfebb8d7d9992db18ebe3ae2876f0a305fab206fd168df931ede293b" +dependencies = [ + "bitcoin-internals", + "hex-conservative", +] + [[package]] name = "bitflags" version = "1.3.2" @@ -1545,18 +1567,6 @@ dependencies = [ "constant_time_eq 0.3.0", ] -[[package]] -name = "block-buffer" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" -dependencies = [ - "block-padding", - "byte-tools", - "byteorder", - "generic-array 0.12.4", -] - [[package]] name = "block-buffer" version = "0.9.0" @@ -1575,15 +1585,6 @@ dependencies = [ "generic-array 0.14.7", ] -[[package]] -name = "block-padding" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" -dependencies = [ - "byte-tools", -] - [[package]] name = "blocking" version = "1.3.1" @@ -2100,6 +2101,7 @@ dependencies = [ "pallet-bridge-messages", "pallet-bridge-parachains", "pallet-bridge-relayers", + "pallet-timestamp", "pallet-utility", "parachains-common", "parachains-runtimes-test-utils", @@ -2619,9 +2621,9 @@ dependencies = [ [[package]] name = "clap-num" -version = "1.0.2" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "488557e97528174edaa2ee268b23a809e0c598213a4bbcb4f34575a46fda147e" +checksum = "0e063d263364859dc54fb064cedb7c122740cd4733644b14b176c097f51e8ab7" dependencies = [ "num-traits", ] @@ -2670,7 +2672,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -2839,11 +2841,10 @@ checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" [[package]] name = "colored" -version = "2.0.4" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2674ec482fbc38012cf31e6c42ba0177b431a0cb6f15fe40efa5aab1bda516f6" +checksum = "cbf2150cce219b664a8a70df7a1f933836724b503f8a413af9365b4dcc4d90b8" dependencies = [ - "is-terminal", "lazy_static", "windows-sys 0.48.0", ] @@ -2871,7 +2872,7 @@ dependencies = [ "ark-std 0.4.0", "fflonk", "getrandom_or_panic", - "merlin 3.0.0", + "merlin", "rand_chacha 0.3.1", ] @@ -3511,16 +3512,6 @@ dependencies = [ "subtle 2.5.0", ] -[[package]] -name = "crypto-mac" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25fab6889090c8133f3deb8f73ba3c65a7f456f66436fc012a1b1e272b1e103e" -dependencies = [ - "generic-array 0.14.7", - "subtle 2.5.0", -] - [[package]] name = "ctr" version = "0.7.0" @@ -3873,6 +3864,7 @@ dependencies = [ "pallet-message-queue", "parity-scale-codec", "polkadot-parachain-primitives", + "polkadot-runtime-common", "polkadot-runtime-parachains", "rand", "sc-client-api", @@ -3901,7 +3893,7 @@ dependencies = [ "proc-macro-crate 3.0.0", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -4046,6 +4038,25 @@ dependencies = [ "sp-trie", ] +[[package]] +name = "cumulus-primitives-storage-weight-reclaim" +version = "1.0.0" +dependencies = [ + "cumulus-primitives-core", + "cumulus-primitives-proof-size-hostfunction", + "cumulus-test-runtime", + "docify 0.2.7", + "frame-support", + "frame-system", + "log", + "parity-scale-codec", + "scale-info", + "sp-io", + "sp-runtime", + "sp-std 14.0.0", + "sp-trie", +] + [[package]] name = "cumulus-primitives-timestamp" version = "0.7.0" @@ -4066,7 +4077,6 @@ dependencies = [ "frame-support", "log", "pallet-asset-conversion", - "pallet-xcm-benchmarks", "parity-scale-codec", "polkadot-runtime-common", "polkadot-runtime-parachains", @@ -4146,6 +4156,7 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-overseer", "polkadot-primitives", + "polkadot-service", "sc-authority-discovery", "sc-client-api", "sc-network", @@ -4208,6 +4219,7 @@ dependencies = [ "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-primitives-proof-size-hostfunction", + "cumulus-primitives-storage-weight-reclaim", "cumulus-test-relay-sproof-builder", "cumulus-test-runtime", "cumulus-test-service", @@ -4252,6 +4264,7 @@ version = "0.1.0" dependencies = [ "cumulus-pallet-parachain-system", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "frame-executive", "frame-support", "frame-system", @@ -4294,6 +4307,7 @@ dependencies = [ "cumulus-client-service", "cumulus-pallet-parachain-system", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "cumulus-relay-chain-inprocess-interface", "cumulus-relay-chain-interface", "cumulus-relay-chain-minimal-node", @@ -4356,19 +4370,6 @@ dependencies = [ "url", ] -[[package]] -name = "curve25519-dalek" -version = "2.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a9b85542f99a2dfa2a1b8e192662741c9859a846b296bef1c92ef9b58b5a216" -dependencies = [ - "byteorder", - "digest 0.8.1", - "rand_core 0.5.1", - "subtle 2.5.0", - "zeroize", -] - [[package]] name = "curve25519-dalek" version = "3.2.0" @@ -4407,7 +4408,7 @@ checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -4447,7 +4448,7 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -4464,7 +4465,7 @@ checksum = "50c49547d73ba8dcfd4ad7325d64c6d5391ff4224d498fc39a6f3f49825a530d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -4672,7 +4673,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -4712,13 +4713,39 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" +[[package]] +name = "docify" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af1b04e6ef3d21119d3eb7b032bca17f99fe041e9c072f30f32cc0e1a2b1f3c4" +dependencies = [ + "docify_macros 0.1.16", +] + [[package]] name = "docify" version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7cc4fd38aaa9fb98ac70794c82a00360d1e165a87fbf96a8a91f9dfc602aaee2" dependencies = [ - "docify_macros", + "docify_macros 0.2.7", +] + +[[package]] +name = "docify_macros" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b5610df7f2acf89a1bb5d1a66ae56b1c7fcdcfe3948856fb3ace3f644d70eb7" +dependencies = [ + "common-path", + "derive-syn-parse", + "lazy_static", + "proc-macro2", + "quote", + "regex", + "syn 2.0.50", + "termcolor", + "walkdir", ] [[package]] @@ -4733,7 +4760,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.49", + "syn 2.0.50", "termcolor", "toml 0.8.8", "walkdir", @@ -4800,6 +4827,7 @@ dependencies = [ "digest 0.10.7", "elliptic-curve", "rfc6979", + "serdect", "signature", "spki", ] @@ -4866,9 +4894,9 @@ checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" [[package]] name = "elliptic-curve" -version = "0.13.5" +version = "0.13.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "968405c8fdc9b3bf4df0a6638858cc0b52462836ab6b1c87377785dd09cf1c0b" +checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" dependencies = [ "base16ct", "crypto-bigint", @@ -4879,6 +4907,7 @@ dependencies = [ "pkcs8", "rand_core 0.6.4", "sec1", + "serdect", "subtle 2.5.0", "zeroize", ] @@ -4958,7 +4987,7 @@ checksum = "5e9a1f9f7d83e59740248a6e14ecf93929ade55027844dfcea78beafccc15745" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -4969,7 +4998,7 @@ checksum = "c2ad8cef1d801a4686bfd8919f0b30eac4c8e48968c437a6405ded4fb5272d2b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -5159,7 +5188,7 @@ dependencies = [ "fs-err", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -5172,12 +5201,6 @@ dependencies = [ "once_cell", ] -[[package]] -name = "fake-simd" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" - [[package]] name = "fallible-iterator" version = "0.2.0" @@ -5287,7 +5310,7 @@ dependencies = [ "ark-poly", "ark-serialize 0.4.2", "ark-std 0.4.0", - "merlin 3.0.0", + "merlin", ] [[package]] @@ -5427,7 +5450,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" name = "frame" version = "0.0.1-dev" dependencies = [ - "docify", + "docify 0.2.7", "frame-executive", "frame-support", "frame-system", @@ -5552,7 +5575,7 @@ dependencies = [ "quote", "scale-info", "sp-arithmetic", - "syn 2.0.49", + "syn 2.0.50", "trybuild", ] @@ -5595,6 +5618,7 @@ dependencies = [ name = "frame-executive" version = "28.0.0" dependencies = [ + "aquamarine 0.3.3", "array-bytes 6.1.0", "frame-support", "frame-system", @@ -5651,11 +5675,11 @@ dependencies = [ name = "frame-support" version = "28.0.0" dependencies = [ - "aquamarine", + "aquamarine 0.5.0", "array-bytes 6.1.0", "assert_matches", "bitflags 1.3.2", - "docify", + "docify 0.2.7", "environmental", "frame-metadata", "frame-support-procedural", @@ -5685,6 +5709,7 @@ dependencies = [ "sp-staking", "sp-state-machine", "sp-std 14.0.0", + "sp-timestamp", "sp-tracing 16.0.0", "sp-weights", "static_assertions", @@ -5707,7 +5732,7 @@ dependencies = [ "quote", "regex", "sp-crypto-hashing", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -5718,7 +5743,7 @@ dependencies = [ "proc-macro-crate 3.0.0", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -5727,7 +5752,7 @@ version = "11.0.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -5798,7 +5823,7 @@ version = "28.0.0" dependencies = [ "cfg-if", "criterion 0.4.0", - "docify", + "docify 0.2.7", "frame-support", "log", "parity-scale-codec", @@ -5960,7 +5985,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -6243,9 +6268,9 @@ checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" [[package]] name = "handlebars" -version = "4.3.7" +version = "5.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83c3372087601b532857d332f5957cbae686da52bb7810bf038c3e3c3cc2fa0d" +checksum = "ab283476b99e66691dee3f1640fea91487a8d81f50fb5ecc75538f8f8879a1e4" dependencies = [ "log", "pest", @@ -6335,6 +6360,12 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +[[package]] +name = "hex-conservative" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30ed443af458ccb6d81c1e7e661545f94d3176752fb1df2f543b902a1e0f51e2" + [[package]] name = "hex-literal" version = "0.4.1" @@ -6360,16 +6391,6 @@ dependencies = [ "digest 0.9.0", ] -[[package]] -name = "hmac" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" -dependencies = [ - "crypto-mac 0.11.0", - "digest 0.9.0", -] - [[package]] name = "hmac" version = "0.12.1" @@ -6970,14 +6991,15 @@ dependencies = [ [[package]] name = "k256" -version = "0.13.1" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cadb76004ed8e97623117f3df85b17aaa6626ab0b0831e6573f104df16cd1bcc" +checksum = "956ff9b67e26e1a6a866cb758f12c6f8746208489e3e4a4b5580802f2f0a587b" dependencies = [ "cfg-if", "ecdsa", "elliptic-curve", "once_cell", + "serdect", "sha2 0.10.7", ] @@ -7057,6 +7079,7 @@ dependencies = [ "pallet-lottery", "pallet-membership", "pallet-message-queue", + "pallet-migrations", "pallet-mixnet", "pallet-mmr", "pallet-multisig", @@ -7901,7 +7924,7 @@ dependencies = [ "macro_magic_core", "macro_magic_macros", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -7915,7 +7938,7 @@ dependencies = [ "macro_magic_core_macros", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -7926,7 +7949,7 @@ checksum = "9ea73aa640dc01d62a590d48c0c3521ed739d53b27f919b25c3551e233481654" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -7937,7 +7960,7 @@ checksum = "ef9d79ae96aaba821963320eb2b6e34d17df1e5a83d8a1985c29cc5be59577b3" dependencies = [ "macro_magic_core", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -8055,18 +8078,6 @@ dependencies = [ "hash-db", ] -[[package]] -name = "merlin" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e261cf0f8b3c42ded9f7d2bb59dea03aa52bc8a1cbc7482f9fc3fd1229d3b42" -dependencies = [ - "byteorder", - "keccak", - "rand_core 0.5.1", - "zeroize", -] - [[package]] name = "merlin" version = "3.0.0" @@ -8103,15 +8114,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] -name = "minimal-node" -version = "4.0.0-dev" +name = "minimal-template-node" +version = "0.0.0" dependencies = [ "clap 4.5.1", "frame", "futures", "futures-timer", "jsonrpsee", - "minimal-runtime", + "minimal-template-runtime", "sc-basic-authorship", "sc-cli", "sc-client-api", @@ -8138,12 +8149,12 @@ dependencies = [ ] [[package]] -name = "minimal-runtime" -version = "0.1.0" +name = "minimal-template-runtime" +version = "0.0.0" dependencies = [ "frame", - "frame-support", "pallet-balances", + "pallet-minimal-template", "pallet-sudo", "pallet-timestamp", "pallet-transaction-payment", @@ -8165,9 +8176,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.8" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", "wasi 0.11.0+wasi-snapshot-preview1", @@ -8657,50 +8668,6 @@ dependencies = [ "kitchensink-runtime", ] -[[package]] -name = "node-template" -version = "4.0.0-dev" -dependencies = [ - "clap 4.5.1", - "frame-benchmarking", - "frame-benchmarking-cli", - "frame-system", - "futures", - "jsonrpsee", - "node-template-runtime", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc", - "sc-basic-authorship", - "sc-cli", - "sc-client-api", - "sc-consensus", - "sc-consensus-aura", - "sc-consensus-grandpa", - "sc-executor", - "sc-network", - "sc-offchain", - "sc-rpc-api", - "sc-service", - "sc-telemetry", - "sc-transaction-pool", - "sc-transaction-pool-api", - "serde_json", - "sp-api", - "sp-block-builder", - "sp-blockchain", - "sp-consensus-aura", - "sp-consensus-grandpa", - "sp-core", - "sp-inherents", - "sp-io", - "sp-keyring", - "sp-runtime", - "sp-timestamp", - "substrate-build-script-utils", - "substrate-frame-rpc-system", - "try-runtime-cli", -] - [[package]] name = "node-template-release" version = "3.0.0" @@ -8715,45 +8682,6 @@ dependencies = [ "toml_edit 0.19.15", ] -[[package]] -name = "node-template-runtime" -version = "4.0.0-dev" -dependencies = [ - "frame-benchmarking", - "frame-executive", - "frame-support", - "frame-system", - "frame-system-benchmarking", - "frame-system-rpc-runtime-api", - "frame-try-runtime", - "pallet-aura", - "pallet-balances", - "pallet-grandpa", - "pallet-sudo", - "pallet-template", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "parity-scale-codec", - "scale-info", - "serde_json", - "sp-api", - "sp-block-builder", - "sp-consensus-aura", - "sp-consensus-grandpa", - "sp-core", - "sp-genesis-builder", - "sp-inherents", - "sp-offchain", - "sp-runtime", - "sp-session", - "sp-std 14.0.0", - "sp-storage 19.0.0", - "sp-transaction-pool", - "sp-version", - "substrate-wasm-builder", -] - [[package]] name = "node-testing" version = "3.0.0-dev" @@ -9277,8 +9205,8 @@ dependencies = [ name = "pallet-bags-list" version = "27.0.0" dependencies = [ - "aquamarine", - "docify", + "aquamarine 0.5.0", + "docify 0.2.7", "frame-benchmarking", "frame-election-provider-support", "frame-support", @@ -9326,7 +9254,7 @@ dependencies = [ name = "pallet-balances" version = "28.0.0" dependencies = [ - "docify", + "docify 0.2.7", "frame-benchmarking", "frame-support", "frame-system", @@ -9563,7 +9491,7 @@ dependencies = [ name = "pallet-collective" version = "28.0.0" dependencies = [ - "docify", + "docify 0.2.7", "frame-benchmarking", "frame-support", "frame-system", @@ -9649,7 +9577,6 @@ dependencies = [ "tempfile", "toml 0.8.8", "twox-hash", - "wat", ] [[package]] @@ -9696,7 +9623,7 @@ version = "18.0.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -9943,6 +9870,26 @@ dependencies = [ "sp-std 14.0.0", ] +[[package]] +name = "pallet-example-single-block-migrations" +version = "0.0.1" +dependencies = [ + "docify 0.2.7", + "frame-executive", + "frame-support", + "frame-system", + "frame-try-runtime", + "log", + "pallet-balances", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std 14.0.0", + "sp-version", +] + [[package]] name = "pallet-example-split" version = "10.0.0" @@ -9984,6 +9931,7 @@ dependencies = [ "pallet-example-frame-crate", "pallet-example-kitchensink", "pallet-example-offchain-worker", + "pallet-example-single-block-migrations", "pallet-example-split", "pallet-example-tasks", ] @@ -9992,7 +9940,7 @@ dependencies = [ name = "pallet-fast-unstake" version = "27.0.0" dependencies = [ - "docify", + "docify 0.2.7", "frame-benchmarking", "frame-election-provider-support", "frame-support", @@ -10189,6 +10137,39 @@ dependencies = [ "sp-weights", ] +[[package]] +name = "pallet-migrations" +version = "1.0.0" +dependencies = [ + "docify 0.1.16", + "frame-benchmarking", + "frame-executive", + "frame-support", + "frame-system", + "impl-trait-for-tuples", + "log", + "parity-scale-codec", + "pretty_assertions", + "scale-info", + "sp-api", + "sp-block-builder", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std 14.0.0", + "sp-tracing 16.0.0", + "sp-version", +] + +[[package]] +name = "pallet-minimal-template" +version = "0.0.0" +dependencies = [ + "frame", + "parity-scale-codec", + "scale-info", +] + [[package]] name = "pallet-mixnet" version = "0.4.0" @@ -10464,7 +10445,7 @@ dependencies = [ name = "pallet-paged-list" version = "0.6.0" dependencies = [ - "docify", + "docify 0.2.7", "frame-benchmarking", "frame-support", "frame-system", @@ -10490,14 +10471,13 @@ dependencies = [ [[package]] name = "pallet-parachain-template" -version = "0.7.0" +version = "0.0.0" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", "parity-scale-codec", "scale-info", - "serde", "sp-core", "sp-io", "sp-runtime", @@ -10507,7 +10487,7 @@ dependencies = [ name = "pallet-parameters" version = "0.0.1" dependencies = [ - "docify", + "docify 0.2.7", "frame-benchmarking", "frame-support", "frame-system", @@ -10668,7 +10648,7 @@ dependencies = [ name = "pallet-safe-mode" version = "9.0.0" dependencies = [ - "docify", + "docify 0.2.7", "frame-benchmarking", "frame-support", "frame-system", @@ -10725,7 +10705,7 @@ dependencies = [ name = "pallet-scheduler" version = "29.0.0" dependencies = [ - "docify", + "docify 0.2.7", "frame-benchmarking", "frame-support", "frame-system", @@ -10871,7 +10851,7 @@ dependencies = [ "proc-macro2", "quote", "sp-runtime", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -10938,7 +10918,7 @@ dependencies = [ name = "pallet-sudo" version = "28.0.0" dependencies = [ - "docify", + "docify 0.2.7", "frame-benchmarking", "frame-support", "frame-system", @@ -10952,7 +10932,7 @@ dependencies = [ [[package]] name = "pallet-template" -version = "4.0.0-dev" +version = "0.0.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -10962,14 +10942,13 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] name = "pallet-timestamp" version = "27.0.0" dependencies = [ - "docify", + "docify 0.2.7", "frame-benchmarking", "frame-support", "frame-system", @@ -11073,7 +11052,7 @@ dependencies = [ name = "pallet-treasury" version = "27.0.0" dependencies = [ - "docify", + "docify 0.2.7", "frame-benchmarking", "frame-support", "frame-system", @@ -11093,7 +11072,7 @@ dependencies = [ name = "pallet-tx-pause" version = "9.0.0" dependencies = [ - "docify", + "docify 0.2.7", "frame-benchmarking", "frame-support", "frame-system", @@ -11275,7 +11254,7 @@ dependencies = [ [[package]] name = "parachain-template-node" -version = "0.1.0" +version = "0.0.0" dependencies = [ "clap 4.5.1", "color-print", @@ -11333,7 +11312,7 @@ dependencies = [ [[package]] name = "parachain-template-runtime" -version = "0.7.0" +version = "0.0.0" dependencies = [ "cumulus-pallet-aura-ext", "cumulus-pallet-parachain-system", @@ -11341,6 +11320,7 @@ dependencies = [ "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-utility", "frame-benchmarking", "frame-executive", @@ -11433,6 +11413,7 @@ dependencies = [ "pallet-balances", "pallet-collator-selection", "pallet-session", + "pallet-timestamp", "pallet-xcm", "parity-scale-codec", "polkadot-parachain-primitives", @@ -11448,6 +11429,19 @@ dependencies = [ "substrate-wasm-builder", ] +[[package]] +name = "parity-bip39" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e69bf016dc406eff7d53a7d3f7cf1c2e72c82b9088aac1118591e36dd2cd3e9" +dependencies = [ + "bitcoin_hashes 0.13.0", + "rand", + "rand_core 0.6.4", + "serde", + "unicode-normalization", +] + [[package]] name = "parity-bytes" version = "0.1.2" @@ -11603,19 +11597,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7924d1d0ad836f665c9065e26d016c673ece3993f30d340068b16f282afc1156" [[package]] -name = "paste" -version = "1.0.14" +name = "password-hash" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" +checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166" +dependencies = [ + "base64ct", + "rand_core 0.6.4", + "subtle 2.5.0", +] [[package]] -name = "pbkdf2" -version = "0.8.0" +name = "paste" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d95f5254224e617595d2cc3cc73ff0a5eaf2637519e25f03388154e9378b6ffa" -dependencies = [ - "crypto-mac 0.11.0", -] +checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" [[package]] name = "pbkdf2" @@ -11624,6 +11620,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2" dependencies = [ "digest 0.10.7", + "password-hash", ] [[package]] @@ -11714,7 +11711,6 @@ dependencies = [ "staging-xcm-builder", "staging-xcm-executor", "substrate-wasm-builder", - "testnet-parachains-constants", ] [[package]] @@ -11951,7 +11947,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -11992,7 +11988,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -12158,6 +12154,7 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", + "polkadot-subsystem-bench", "rand", "sc-network", "schnellru", @@ -12189,6 +12186,7 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", + "polkadot-subsystem-bench", "rand", "sc-network", "schnellru", @@ -12414,7 +12412,7 @@ dependencies = [ "kvdb", "kvdb-memorydb", "log", - "merlin 3.0.0", + "merlin", "parity-scale-codec", "parking_lot 0.12.1", "polkadot-node-jaeger", @@ -12489,7 +12487,9 @@ dependencies = [ "polkadot-primitives", "polkadot-primitives-test-helpers", "polkadot-statement-table", + "rstest", "sc-keystore", + "schnellru", "sp-application-crypto", "sp-core", "sp-keyring", @@ -12641,6 +12641,7 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", + "rstest", "sc-keystore", "sp-application-crypto", "sp-core", @@ -12664,6 +12665,8 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", + "rstest", + "schnellru", "sp-application-crypto", "sp-keystore", "thiserror", @@ -13159,6 +13162,7 @@ version = "7.0.0" dependencies = [ "bitvec", "hex-literal", + "log", "parity-scale-codec", "polkadot-core-primitives", "polkadot-parachain-primitives", @@ -13251,7 +13255,6 @@ dependencies = [ "pallet-transaction-payment", "pallet-treasury", "pallet-vesting", - "pallet-xcm-benchmarks", "parity-scale-codec", "polkadot-primitives", "polkadot-primitives-test-helpers", @@ -13325,6 +13328,7 @@ dependencies = [ "polkadot-runtime-metrics", "rand", "rand_chacha 0.3.1", + "rstest", "rustc-hex", "sc-keystore", "scale-info", @@ -13356,13 +13360,28 @@ version = "0.0.1" dependencies = [ "cumulus-pallet-aura-ext", "cumulus-pallet-parachain-system", - "docify", + "docify 0.2.7", "frame", + "frame-executive", + "frame-support", + "frame-system", "kitchensink-runtime", + "pallet-assets", "pallet-aura", + "pallet-authorship", + "pallet-balances", + "pallet-collective", "pallet-default-config-example", + "pallet-democracy", + "pallet-example-offchain-worker", + "pallet-example-single-block-migrations", "pallet-examples", + "pallet-multisig", + "pallet-proxy", + "pallet-scheduler", "pallet-timestamp", + "pallet-transaction-payment", + "pallet-utility", "parity-scale-codec", "sc-cli", "sc-client-db", @@ -13381,7 +13400,9 @@ dependencies = [ "sp-core", "sp-io", "sp-keyring", + "sp-offchain", "sp-runtime", + "sp-version", "staging-chain-spec-builder", "staging-node-cli", "staging-parachain-info", @@ -13396,6 +13417,7 @@ version = "7.0.0" dependencies = [ "assert_matches", "async-trait", + "bitvec", "env_logger 0.9.3", "frame-benchmarking", "frame-benchmarking-cli", @@ -13556,6 +13578,7 @@ dependencies = [ "parity-scale-codec", "polkadot-primitives", "sp-core", + "tracing-gum", ] [[package]] @@ -13608,7 +13631,7 @@ dependencies = [ "sc-keystore", "sc-network", "sc-service", - "schnorrkel 0.9.1", + "schnorrkel 0.11.4", "serde", "serde_yaml", "sha1", @@ -13813,6 +13836,28 @@ dependencies = [ "westend-runtime", ] +[[package]] +name = "polkavm" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a3693e5efdb2bf74e449cd25fd777a28bd7ed87e41f5d5da75eb31b4de48b94" +dependencies = [ + "libc", + "log", + "polkavm-assembler", + "polkavm-common 0.9.0", + "polkavm-linux-raw", +] + +[[package]] +name = "polkavm-assembler" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fa96d6d868243acc12de813dd48e756cbadcc8e13964c70d272753266deadc1" +dependencies = [ + "log", +] + [[package]] name = "polkavm-common" version = "0.5.0" @@ -13821,9 +13866,12 @@ checksum = "88b4e215c80fe876147f3d58158d5dfeae7dabdd6047e175af77095b78d0035c" [[package]] name = "polkavm-common" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92c99f7eee94e7be43ba37eef65ad0ee8cbaf89b7c00001c3f6d2be985cb1817" +checksum = "1d9428a5cfcc85c5d7b9fc4b6a18c4b802d0173d768182a51cc7751640f08b92" +dependencies = [ + "log", +] [[package]] name = "polkavm-derive" @@ -13832,14 +13880,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6380dbe1fb03ecc74ad55d841cfc75480222d153ba69ddcb00977866cbdabdb8" dependencies = [ "polkavm-derive-impl 0.5.0", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] name = "polkavm-derive" -version = "0.8.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79fa916f7962348bd1bb1a65a83401675e6fc86c51a0fdbcf92a3108e58e6125" +checksum = "ae8c4bea6f3e11cd89bb18bcdddac10bd9a24015399bd1c485ad68a985a19606" dependencies = [ "polkavm-derive-impl-macro", ] @@ -13853,29 +13901,29 @@ dependencies = [ "polkavm-common 0.5.0", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] name = "polkavm-derive-impl" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c10b2654a8a10a83c260bfb93e97b262cf0017494ab94a65d389e0eda6de6c9c" +checksum = "5c4fdfc49717fb9a196e74a5d28e0bc764eb394a2c803eb11133a31ac996c60c" dependencies = [ - "polkavm-common 0.8.0", + "polkavm-common 0.9.0", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] name = "polkavm-derive-impl-macro" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15e85319a0d5129dc9f021c62607e0804f5fb777a05cdda44d750ac0732def66" +checksum = "8ba81f7b5faac81e528eb6158a6f3c9e0bb1008e0ffa19653bc8dea925ecb429" dependencies = [ - "polkavm-derive-impl 0.8.0", - "syn 2.0.49", + "polkavm-derive-impl 0.9.0", + "syn 2.0.50", ] [[package]] @@ -13895,19 +13943,25 @@ dependencies = [ [[package]] name = "polkavm-linker" -version = "0.8.2" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdec1451cb18261d5d01de82acc15305e417fb59588cdcb3127d3dcc9672b925" +checksum = "9c7be503e60cf56c0eb785f90aaba4b583b36bff00e93997d93fef97f9553c39" dependencies = [ "gimli 0.28.0", "hashbrown 0.14.3", "log", "object 0.32.2", - "polkavm-common 0.8.0", + "polkavm-common 0.9.0", "regalloc2 0.9.3", "rustc-demangle", ] +[[package]] +name = "polkavm-linux-raw" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26e85d3456948e650dff0cfc85603915847faf893ed1e66b020bb82ef4557120" + [[package]] name = "polling" version = "2.8.0" @@ -14080,7 +14134,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c64d9ba0963cdcea2e1b2230fbae2bab30eb25a174be395c41e764bfb65dd62" dependencies = [ "proc-macro2", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -14171,7 +14225,7 @@ checksum = "9b698b0b09d40e9b7c1a47b132d66a8b54bcd20583d9b6d06e4535e383b4405c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -14243,7 +14297,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -14343,7 +14397,7 @@ dependencies = [ "itertools 0.11.0", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -14686,7 +14740,7 @@ checksum = "7f7473c2cfcf90008193dd0e3e16599455cb601a9fce322b5bb55de799664925" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -14764,6 +14818,12 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" +[[package]] +name = "relative-path" +version = "1.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e898588f33fdd5b9420719948f9f2a32c922a246964576f71ba7f24f80610fbc" + [[package]] name = "remote-ext-tests-bags-list" version = "1.0.0" @@ -14851,7 +14911,7 @@ dependencies = [ "blake2 0.10.6", "common", "fflonk", - "merlin 3.0.0", + "merlin", ] [[package]] @@ -15146,6 +15206,35 @@ dependencies = [ "winapi", ] +[[package]] +name = "rstest" +version = "0.18.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97eeab2f3c0a199bc4be135c36c924b6590b88c377d416494288c14f2db30199" +dependencies = [ + "futures", + "futures-timer", + "rstest_macros", + "rustc_version 0.4.0", +] + +[[package]] +name = "rstest_macros" +version = "0.18.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d428f8247852f894ee1be110b375111b586d4fa431f6c46e64ba5a0dcccbe605" +dependencies = [ + "cfg-if", + "glob", + "proc-macro2", + "quote", + "regex", + "relative-path", + "rustc_version 0.4.0", + "syn 2.0.50", + "unicode-ident", +] + [[package]] name = "rtnetlink" version = "0.10.1" @@ -15565,7 +15654,7 @@ name = "sc-chain-spec" version = "27.0.0" dependencies = [ "array-bytes 6.1.0", - "docify", + "docify 0.2.7", "log", "memmap2 0.9.3", "parity-scale-codec", @@ -15596,7 +15685,7 @@ dependencies = [ "proc-macro-crate 3.0.0", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -15604,7 +15693,6 @@ name = "sc-cli" version = "0.36.0" dependencies = [ "array-bytes 6.1.0", - "bip39", "chrono", "clap 4.5.1", "fdlimit", @@ -15614,6 +15702,7 @@ dependencies = [ "libp2p-identity", "log", "names", + "parity-bip39", "parity-scale-codec", "rand", "regex", @@ -16085,6 +16174,7 @@ dependencies = [ "paste", "regex", "sc-executor-common", + "sc-executor-polkavm", "sc-executor-wasmtime", "sc-runtime-test", "sc-tracing", @@ -16114,6 +16204,7 @@ dependencies = [ name = "sc-executor-common" version = "0.29.0" dependencies = [ + "polkavm", "sc-allocator", "sp-maybe-compressed-blob", "sp-wasm-interface 20.0.0", @@ -16121,6 +16212,16 @@ dependencies = [ "wasm-instrument", ] +[[package]] +name = "sc-executor-polkavm" +version = "0.29.0" +dependencies = [ + "log", + "polkavm", + "sc-executor-common", + "sp-wasm-interface 20.0.0", +] + [[package]] name = "sc-executor-wasmtime" version = "0.29.0" @@ -16568,7 +16669,6 @@ dependencies = [ "hyper", "jsonrpsee", "log", - "pin-project", "serde_json", "substrate-prometheus-endpoint", "tokio", @@ -16667,6 +16767,7 @@ dependencies = [ "sc-transaction-pool", "sc-transaction-pool-api", "sc-utils", + "schnellru", "serde", "serde_json", "sp-api", @@ -16866,7 +16967,7 @@ dependencies = [ "proc-macro-crate 3.0.0", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -17004,22 +17105,6 @@ dependencies = [ "hashbrown 0.13.2", ] -[[package]] -name = "schnorrkel" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "021b403afe70d81eea68f6ea12f6b3c9588e5d536a94c3bf80f15e7faa267862" -dependencies = [ - "arrayref", - "arrayvec 0.5.2", - "curve25519-dalek 2.1.3", - "merlin 2.0.1", - "rand_core 0.5.1", - "sha2 0.8.2", - "subtle 2.5.0", - "zeroize", -] - [[package]] name = "schnorrkel" version = "0.10.2" @@ -17029,7 +17114,7 @@ dependencies = [ "arrayref", "arrayvec 0.7.4", "curve25519-dalek-ng", - "merlin 3.0.0", + "merlin", "rand_core 0.6.4", "sha2 0.9.9", "subtle-ng", @@ -17047,7 +17132,7 @@ dependencies = [ "arrayvec 0.7.4", "curve25519-dalek 4.1.2", "getrandom_or_panic", - "merlin 3.0.0", + "merlin", "rand_core 0.6.4", "serde_bytes", "sha2 0.10.7", @@ -17093,6 +17178,7 @@ dependencies = [ "der", "generic-array 0.14.7", "pkcs8", + "serdect", "subtle 2.5.0", "zeroize", ] @@ -17250,9 +17336,9 @@ checksum = "f97841a747eef040fcd2e7b3b9a220a7205926e60488e673d9e4926d27772ce5" [[package]] name = "serde" -version = "1.0.196" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "870026e60fa08c69f064aa766c10f10b1d62db9ccd4d0abb206472bee0ce3b32" +checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" dependencies = [ "serde_derive", ] @@ -17277,13 +17363,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.196" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33c85360c95e7d137454dc81d9a4ed2b8efd8fbe19cee57357b32b9771fccb67" +checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -17308,9 +17394,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.113" +version = "1.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69801b70b1c3dac963ecb03a364ba0ceda9cf60c71cfe475e99864759c8b8a79" +checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0" dependencies = [ "itoa", "ryu", @@ -17340,9 +17426,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.9.31" +version = "0.9.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adf8a49373e98a4c5f0ceb5d05aa7c648d75f63774981ed95b7c7443bbd50c6e" +checksum = "8fd075d994154d4a774f95b51fb96bdc2832b0ea48425c92546073816cda1f2f" dependencies = [ "indexmap 2.2.3", "itoa", @@ -17351,6 +17437,16 @@ dependencies = [ "unsafe-libyaml", ] +[[package]] +name = "serdect" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a84f14a19e9a014bb9f4512488d9829a68e04ecabffb0f9904cd1ace94598177" +dependencies = [ + "base16ct", + "serde", +] + [[package]] name = "serial_test" version = "2.0.0" @@ -17373,7 +17469,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -17411,18 +17507,6 @@ dependencies = [ "digest 0.10.7", ] -[[package]] -name = "sha2" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a256f46ea78a0c0d9ff00077504903ac881a1dafdc20da66545699e7776b3e69" -dependencies = [ - "block-buffer 0.7.3", - "digest 0.8.1", - "fake-simd", - "opaque-debug 0.2.3", -] - [[package]] name = "sha2" version = "0.9.9" @@ -17657,13 +17741,13 @@ dependencies = [ "hmac 0.12.1", "itertools 0.11.0", "libsecp256k1", - "merlin 3.0.0", + "merlin", "no-std-net", "nom", "num-bigint", "num-rational", "num-traits", - "pbkdf2 0.12.2", + "pbkdf2", "pin-project", "poly1305 0.8.0", "rand", @@ -17755,7 +17839,7 @@ dependencies = [ [[package]] name = "snowbridge-beacon-primitives" -version = "0.0.0" +version = "0.2.0" dependencies = [ "byte-slice-cast", "frame-support", @@ -17779,7 +17863,7 @@ dependencies = [ [[package]] name = "snowbridge-core" -version = "0.0.0" +version = "0.2.0" dependencies = [ "ethabi-decode", "frame-support", @@ -17802,7 +17886,7 @@ dependencies = [ [[package]] name = "snowbridge-ethereum" -version = "0.1.0" +version = "0.3.0" dependencies = [ "ethabi-decode", "ethbloom", @@ -17841,7 +17925,7 @@ dependencies = [ [[package]] name = "snowbridge-outbound-queue-merkle-tree" -version = "0.1.1" +version = "0.3.0" dependencies = [ "array-bytes 4.2.0", "env_logger 0.9.3", @@ -17856,7 +17940,7 @@ dependencies = [ [[package]] name = "snowbridge-outbound-queue-runtime-api" -version = "0.0.0" +version = "0.2.0" dependencies = [ "frame-support", "parity-scale-codec", @@ -17870,7 +17954,7 @@ dependencies = [ [[package]] name = "snowbridge-pallet-ethereum-client" -version = "0.0.0" +version = "0.2.0" dependencies = [ "bp-runtime", "byte-slice-cast", @@ -17916,7 +18000,7 @@ dependencies = [ [[package]] name = "snowbridge-pallet-inbound-queue" -version = "0.0.0" +version = "0.2.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -17949,7 +18033,7 @@ dependencies = [ [[package]] name = "snowbridge-pallet-inbound-queue-fixtures" -version = "0.9.0" +version = "0.10.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -17963,7 +18047,7 @@ dependencies = [ [[package]] name = "snowbridge-pallet-outbound-queue" -version = "0.0.0" +version = "0.2.0" dependencies = [ "bridge-hub-common", "ethabi-decode", @@ -17988,7 +18072,7 @@ dependencies = [ [[package]] name = "snowbridge-pallet-system" -version = "0.0.0" +version = "0.2.0" dependencies = [ "ethabi-decode", "frame-benchmarking", @@ -18016,7 +18100,7 @@ dependencies = [ [[package]] name = "snowbridge-router-primitives" -version = "0.0.0" +version = "0.9.0" dependencies = [ "ethabi-decode", "frame-support", @@ -18039,7 +18123,7 @@ dependencies = [ [[package]] name = "snowbridge-runtime-common" -version = "0.0.0" +version = "0.2.0" dependencies = [ "frame-support", "frame-system", @@ -18055,7 +18139,7 @@ dependencies = [ [[package]] name = "snowbridge-runtime-test-common" -version = "0.0.0" +version = "0.2.0" dependencies = [ "assets-common", "bridge-hub-test-utils", @@ -18132,7 +18216,7 @@ dependencies = [ [[package]] name = "snowbridge-system-runtime-api" -version = "0.0.0" +version = "0.2.0" dependencies = [ "parity-scale-codec", "snowbridge-core", @@ -18179,6 +18263,87 @@ dependencies = [ "sha-1 0.9.8", ] +[[package]] +name = "solochain-template-node" +version = "0.0.0" +dependencies = [ + "clap 4.5.1", + "frame-benchmarking-cli", + "frame-system", + "futures", + "jsonrpsee", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc", + "sc-basic-authorship", + "sc-cli", + "sc-client-api", + "sc-consensus", + "sc-consensus-aura", + "sc-consensus-grandpa", + "sc-executor", + "sc-network", + "sc-offchain", + "sc-rpc-api", + "sc-service", + "sc-telemetry", + "sc-transaction-pool", + "sc-transaction-pool-api", + "serde_json", + "solochain-template-runtime", + "sp-api", + "sp-block-builder", + "sp-blockchain", + "sp-consensus-aura", + "sp-consensus-grandpa", + "sp-core", + "sp-inherents", + "sp-io", + "sp-keyring", + "sp-runtime", + "sp-timestamp", + "substrate-build-script-utils", + "substrate-frame-rpc-system", + "try-runtime-cli", +] + +[[package]] +name = "solochain-template-runtime" +version = "0.0.0" +dependencies = [ + "frame-benchmarking", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", + "pallet-aura", + "pallet-balances", + "pallet-grandpa", + "pallet-sudo", + "pallet-template", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "parity-scale-codec", + "scale-info", + "sp-api", + "sp-block-builder", + "sp-consensus-aura", + "sp-consensus-grandpa", + "sp-core", + "sp-genesis-builder", + "sp-inherents", + "sp-offchain", + "sp-runtime", + "sp-session", + "sp-std 14.0.0", + "sp-storage 19.0.0", + "sp-transaction-pool", + "sp-version", + "substrate-wasm-builder", +] + [[package]] name = "sp-api" version = "26.0.0" @@ -18212,7 +18377,7 @@ dependencies = [ "proc-macro-crate 3.0.0", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -18477,7 +18642,6 @@ version = "28.0.0" dependencies = [ "array-bytes 6.1.0", "bandersnatch_vrfs", - "bip39", "bitflags 1.3.2", "blake2 0.10.6", "bounded-collections", @@ -18490,10 +18654,12 @@ dependencies = [ "hash256-std-hasher", "impl-serde", "itertools 0.10.5", + "k256", "lazy_static", "libsecp256k1", "log", - "merlin 3.0.0", + "merlin", + "parity-bip39", "parity-scale-codec", "parking_lot 0.12.1", "paste", @@ -18605,7 +18771,7 @@ version = "0.0.0" dependencies = [ "quote", "sp-crypto-hashing", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -18623,7 +18789,7 @@ source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf5 dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -18632,7 +18798,7 @@ version = "14.0.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -18689,6 +18855,7 @@ dependencies = [ "libsecp256k1", "log", "parity-scale-codec", + "polkavm-derive 0.9.1", "rustversion", "secp256k1", "sp-core", @@ -18830,7 +18997,7 @@ dependencies = [ name = "sp-runtime" version = "31.0.1" dependencies = [ - "docify", + "docify 0.2.7", "either", "hash256-std-hasher", "impl-trait-for-tuples", @@ -18880,7 +19047,7 @@ dependencies = [ "bytes", "impl-trait-for-tuples", "parity-scale-codec", - "polkavm-derive 0.8.0", + "polkavm-derive 0.9.1", "primitive-types", "rustversion", "sp-core", @@ -18906,7 +19073,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -18918,7 +19085,7 @@ dependencies = [ "proc-macro-crate 3.0.0", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -19190,7 +19357,7 @@ dependencies = [ "proc-macro2", "quote", "sp-version", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -19635,7 +19802,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -19648,14 +19815,14 @@ dependencies = [ [[package]] name = "substrate-bip39" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e620c7098893ba667438b47169c00aacdd9e7c10e042250ce2b60b087ec97328" +version = "0.4.7" dependencies = [ - "hmac 0.11.0", - "pbkdf2 0.8.0", - "schnorrkel 0.9.1", - "sha2 0.9.9", + "bip39", + "hmac 0.12.1", + "pbkdf2", + "rustc-hex", + "schnorrkel 0.11.4", + "sha2 0.10.7", "zeroize", ] @@ -19902,7 +20069,7 @@ dependencies = [ "console", "filetime", "parity-wasm", - "polkavm-linker 0.8.2", + "polkavm-linker 0.9.2", "sp-maybe-compressed-blob", "strum 0.24.1", "tempfile", @@ -20033,9 +20200,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.49" +version = "2.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "915aea9e586f80826ee59f8453c1101f9d1c4b3964cd2460185ee8e299ada496" +checksum = "74f1bdc9872430ce9b75da68329d1c1746faf50ffac5f19e02b71e37ff881ffb" dependencies = [ "proc-macro2", "quote", @@ -20051,7 +20218,7 @@ dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -20316,7 +20483,7 @@ checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -20477,7 +20644,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -20684,7 +20851,7 @@ checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -20726,7 +20893,7 @@ dependencies = [ "proc-macro-crate 3.0.0", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -21291,7 +21458,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", "wasm-bindgen-shared", ] @@ -21325,7 +21492,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -22320,10 +22487,12 @@ dependencies = [ "pallet-transaction-payment", "pallet-xcm", "parity-scale-codec", + "polkadot-service", "polkadot-test-client", "polkadot-test-runtime", "polkadot-test-service", "sp-consensus", + "sp-core", "sp-keyring", "sp-runtime", "sp-state-machine", @@ -22340,7 +22509,7 @@ dependencies = [ "proc-macro2", "quote", "staging-xcm", - "syn 2.0.49", + "syn 2.0.50", "trybuild", ] @@ -22462,14 +22631,14 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] name = "zeroize" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" +checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" dependencies = [ "zeroize_derive", ] @@ -22482,7 +22651,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index d09f19f280cbb0b81ac634249912357919209fbf..f256d02808a8f4475eeb4d0c7ac35d305cfe05ab 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,6 +3,7 @@ authors = ["Parity Technologies "] edition = "2021" repository = "https://github.com/paritytech/polkadot-sdk.git" license = "GPL-3.0-only" +homepage = "https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/index.html" [workspace] resolver = "2" @@ -74,9 +75,6 @@ members = [ "cumulus/pallets/solo-to-para", "cumulus/pallets/xcm", "cumulus/pallets/xcmp-queue", - "cumulus/parachain-template/node", - "cumulus/parachain-template/pallets/template", - "cumulus/parachain-template/runtime", "cumulus/parachains/common", "cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo", "cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend", @@ -127,6 +125,7 @@ members = [ "cumulus/primitives/core", "cumulus/primitives/parachain-inherent", "cumulus/primitives/proof-size-hostfunction", + "cumulus/primitives/storage-weight-reclaim", "cumulus/primitives/timestamp", "cumulus/primitives/utility", "cumulus/test/client", @@ -218,11 +217,6 @@ members = [ "polkadot/xcm/xcm-simulator", "polkadot/xcm/xcm-simulator/example", "polkadot/xcm/xcm-simulator/fuzzer", - "substrate/bin/minimal/node", - "substrate/bin/minimal/runtime", - "substrate/bin/node-template/node", - "substrate/bin/node-template/pallets/template", - "substrate/bin/node-template/runtime", "substrate/bin/node/bench", "substrate/bin/node/cli", "substrate/bin/node/inspect", @@ -255,6 +249,7 @@ members = [ "substrate/client/db", "substrate/client/executor", "substrate/client/executor/common", + "substrate/client/executor/polkavm", "substrate/client/executor/runtime-test", "substrate/client/executor/wasmtime", "substrate/client/informant", @@ -336,6 +331,7 @@ members = [ "substrate/frame/examples/frame-crate", "substrate/frame/examples/kitchensink", "substrate/frame/examples/offchain-worker", + "substrate/frame/examples/single-block-migrations", "substrate/frame/examples/split", "substrate/frame/examples/tasks", "substrate/frame/executive", @@ -350,6 +346,7 @@ members = [ "substrate/frame/membership", "substrate/frame/merkle-mountain-range", "substrate/frame/message-queue", + "substrate/frame/migrations", "substrate/frame/mixnet", "substrate/frame/multisig", "substrate/frame/nft-fractionalization", @@ -500,7 +497,20 @@ members = [ "substrate/utils/frame/rpc/system", "substrate/utils/frame/try-runtime/cli", "substrate/utils/prometheus", + "substrate/utils/substrate-bip39", "substrate/utils/wasm-builder", + + "templates/minimal/node", + "templates/minimal/pallets/template", + "templates/minimal/runtime", + + "templates/solochain/node", + "templates/solochain/pallets/template", + "templates/solochain/runtime", + + "templates/parachain/node", + "templates/parachain/pallets/template", + "templates/parachain/runtime", ] default-members = ["polkadot", "substrate/bin/node/cli"] @@ -534,9 +544,18 @@ extra-unused-type-parameters = { level = "allow", priority = 2 } # stylistic default_constructed_unit_structs = { level = "allow", priority = 2 } # stylistic [workspace.dependencies] -polkavm-linker = "0.8.2" -polkavm-derive = "0.8.0" +polkavm = "0.9.3" +polkavm-linker = "0.9.2" +polkavm-derive = "0.9.1" log = { version = "0.4.20", default-features = false } +quote = { version = "1.0.33" } +serde = { version = "1.0.197", default-features = false } +serde-big-array = { version = "0.3.2" } +serde_derive = { version = "1.0.117" } +serde_json = { version = "1.0.114", default-features = false } +serde_yaml = { version = "0.9" } +syn = { version = "2.0.50" } +thiserror = { version = "1.0.48" } [profile.release] # Polkadot runtime requires unwinding. diff --git a/bridges/primitives/header-chain/Cargo.toml b/bridges/primitives/header-chain/Cargo.toml index 828b567bb947d851e5d0f8dac03dadebf120e614..205b593365ef8216a2e501e5751303185d4f7537 100644 --- a/bridges/primitives/header-chain/Cargo.toml +++ b/bridges/primitives/header-chain/Cargo.toml @@ -13,7 +13,7 @@ workspace = true codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } finality-grandpa = { version = "0.16.2", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0", default-features = false, features = ["alloc", "derive"] } +serde = { features = ["alloc", "derive"], workspace = true } # Bridge dependencies diff --git a/bridges/primitives/messages/Cargo.toml b/bridges/primitives/messages/Cargo.toml index 54aae4a2f76a425046860835ccd2a66cc29cf135..8aa6b4b05e5efb2427a8548e91ec5f47ab494968 100644 --- a/bridges/primitives/messages/Cargo.toml +++ b/bridges/primitives/messages/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["bit-vec", "derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive"] } -serde = { version = "1.0", default-features = false, features = ["alloc", "derive"] } +serde = { features = ["alloc", "derive"], workspace = true } # Bridge dependencies diff --git a/bridges/primitives/polkadot-core/Cargo.toml b/bridges/primitives/polkadot-core/Cargo.toml index e667c283a07937ab88053c064fb7b8cfb380f765..c0dae684b5f2f3b7b9be096a808fc67d15dadfcf 100644 --- a/bridges/primitives/polkadot-core/Cargo.toml +++ b/bridges/primitives/polkadot-core/Cargo.toml @@ -13,7 +13,7 @@ workspace = true codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive"] } parity-util-mem = { version = "0.12.0", optional = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0", optional = true, features = ["derive"] } +serde = { optional = true, features = ["derive"], workspace = true, default-features = true } # Bridge Dependencies diff --git a/bridges/primitives/runtime/Cargo.toml b/bridges/primitives/runtime/Cargo.toml index 2ea31c26d6b7d9c6c9a2e5e7f14beab9cc850a21..22206fb2c376ce53fee9dc8ff806baaef3ce7c28 100644 --- a/bridges/primitives/runtime/Cargo.toml +++ b/bridges/primitives/runtime/Cargo.toml @@ -16,7 +16,7 @@ impl-trait-for-tuples = "0.2.2" log = { workspace = true } num-traits = { version = "0.2", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0", default-features = false, features = ["alloc", "derive"] } +serde = { features = ["alloc", "derive"], workspace = true } # Substrate Dependencies diff --git a/bridges/primitives/test-utils/src/lib.rs b/bridges/primitives/test-utils/src/lib.rs index f23ddd1a10d3681900b024999aef279ea6fcb91d..1d80890779bf8310b393d585749e96f9577196a1 100644 --- a/bridges/primitives/test-utils/src/lib.rs +++ b/bridges/primitives/test-utils/src/lib.rs @@ -129,7 +129,7 @@ pub fn make_justification_for_header( votes_ancestries.push(child.clone()); } - // The header we need to use when pre-commiting is the one at the highest height + // The header we need to use when pre-committing is the one at the highest height // on our chain. let precommit_candidate = chain.last().map(|h| (h.hash(), *h.number())).unwrap(); unsigned_precommits.push(precommit_candidate); diff --git a/bridges/snowbridge/README.md b/bridges/snowbridge/README.md index 49b9c2eaf553780176897a770bad9579d53bfaa9..6561df401120e9c5c5d6ee2762eb1423b5d6daaf 100644 --- a/bridges/snowbridge/README.md +++ b/bridges/snowbridge/README.md @@ -1,32 +1,40 @@ -# Snowbridge -[![codecov](https://codecov.io/gh/Snowfork/snowbridge/branch/main/graph/badge.svg?token=9hvgSws4rN)](https://codecov.io/gh/Snowfork/snowbridge) +# Snowbridge · +[![codecov](https://codecov.io/gh/Snowfork/polkadot-sdk/branch/snowbridge/graph/badge.svg?token=9hvgSws4rN)](https://codecov.io/gh/Snowfork/polkadot-sdk) ![GitHub](https://img.shields.io/github/license/Snowfork/snowbridge) Snowbridge is a trustless bridge between Polkadot and Ethereum. For documentation, visit https://docs.snowbridge.network. ## Components +The Snowbridge project lives in two repositories: + +- [Snowfork/Polkadot-sdk](https://github.com/Snowfork/polkadot-sdk): The Snowbridge parachain and pallets live in +a fork of the Polkadot SDK. Changes are eventually contributed back to +[paritytech/Polkadot-sdk](https://github.com/paritytech/polkadot-sdk) +- [Snowfork/snowbridge](https://github.com/Snowfork/snowbridge): The rest of the Snowbridge components, like contracts, +off-chain relayer, end-to-end tests and test-net setup code. + ### Parachain -Polkadot parachain and our pallets. See [parachain/README.md](https://github.com/Snowfork/snowbridge/blob/main/parachain/README.md). +Polkadot parachain and our pallets. See [README.md](https://github.com/Snowfork/polkadot-sdk/blob/snowbridge/bridges/snowbridge/README.md). ### Contracts -Ethereum contracts and unit tests. See [contracts/README.md](https://github.com/Snowfork/snowbridge/blob/main/contracts/README.md) +Ethereum contracts and unit tests. See [Snowfork/snowbridge/contracts/README.md](https://github.com/Snowfork/snowbridge/blob/main/contracts/README.md) ### Relayer Off-chain relayer services for relaying messages between Polkadot and Ethereum. See -[relayer/README.md](https://github.com/Snowfork/snowbridge/blob/main/relayer/README.md) +[Snowfork/snowbridge/relayer/README.md](https://github.com/Snowfork/snowbridge/blob/main/relayer/README.md) ### Local Testnet Scripts to provision a local testnet, running the above services to bridge between local deployments of Polkadot and -Ethereum. See [web/packages/test/README.md](https://github.com/Snowfork/snowbridge/blob/main/web/packages/test/README.md). +Ethereum. See [Snowfork/snowbridge/web/packages/test/README.md](https://github.com/Snowfork/snowbridge/blob/main/web/packages/test/README.md). ### Smoke Tests -Integration tests for our local testnet. See [smoketest/README.md](https://github.com/Snowfork/snowbridge/blob/main/smoketest/README.md). +Integration tests for our local testnet. See [Snowfork/snowbridge/smoketest/README.md](https://github.com/Snowfork/snowbridge/blob/main/smoketest/README.md). ## Development @@ -83,7 +91,7 @@ direnv allow ### Upgrading the Rust toolchain -Sometimes we would like to upgrade rust toolchain. First update `parachain/rust-toolchain.toml` as required and then +Sometimes we would like to upgrade rust toolchain. First update `rust-toolchain.toml` as required and then update `flake.lock` running ```sh nix flake lock --update-input rust-overlay diff --git a/bridges/snowbridge/pallets/ethereum-client/Cargo.toml b/bridges/snowbridge/pallets/ethereum-client/Cargo.toml index 541ded26737b55677d7e005783ab43d7fd5193c4..c8999633c97abb00174e38e16ed5618e7baf0b59 100644 --- a/bridges/snowbridge/pallets/ethereum-client/Cargo.toml +++ b/bridges/snowbridge/pallets/ethereum-client/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-pallet-ethereum-client" description = "Snowbridge Ethereum Client Pallet" -version = "0.0.0" +version = "0.2.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true @@ -15,8 +15,8 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.196", optional = true } -serde_json = { version = "1.0.113", optional = true } +serde = { optional = true, workspace = true, default-features = true } +serde_json = { optional = true, workspace = true, default-features = true } codec = { version = "3.6.1", package = "parity-scale-codec", default-features = false, features = ["derive"] } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } ssz_rs = { version = "0.9.0", default-features = false } @@ -45,12 +45,12 @@ pallet-timestamp = { path = "../../../../substrate/frame/timestamp", default-fea [dev-dependencies] rand = "0.8.5" sp-keyring = { path = "../../../../substrate/primitives/keyring" } -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } hex-literal = "0.4.1" pallet-timestamp = { path = "../../../../substrate/frame/timestamp" } snowbridge-pallet-ethereum-client-fixtures = { path = "./fixtures" } sp-io = { path = "../../../../substrate/primitives/io" } -serde = "1.0.196" +serde = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/bridges/snowbridge/pallets/inbound-queue/Cargo.toml b/bridges/snowbridge/pallets/inbound-queue/Cargo.toml index 09653fa3b480183366113f5d90e458b03c845535..b850496cd4e14cd906565d488450b339a29f463f 100644 --- a/bridges/snowbridge/pallets/inbound-queue/Cargo.toml +++ b/bridges/snowbridge/pallets/inbound-queue/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-pallet-inbound-queue" description = "Snowbridge Inbound Queue Pallet" -version = "0.0.0" +version = "0.2.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.196", optional = true } +serde = { optional = true, workspace = true, default-features = true } codec = { version = "3.6.1", package = "parity-scale-codec", default-features = false, features = ["derive"] } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } hex-literal = { version = "0.4.1", optional = true } diff --git a/bridges/snowbridge/pallets/inbound-queue/fixtures/Cargo.toml b/bridges/snowbridge/pallets/inbound-queue/fixtures/Cargo.toml index 61f1421e056773c4f078390f9c48f7b8fa0420d3..64605a42f0d383d838429eb9b82b5f6cf238ab09 100644 --- a/bridges/snowbridge/pallets/inbound-queue/fixtures/Cargo.toml +++ b/bridges/snowbridge/pallets/inbound-queue/fixtures/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-pallet-inbound-queue-fixtures" description = "Snowbridge Inbound Queue Test Fixtures" -version = "0.9.0" +version = "0.10.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs index 110f611c6766020039bd1f73def900914da8cae2..749fb0367f332d743b01ad9d56238106ced36e72 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs @@ -3,7 +3,7 @@ use super::*; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU128, ConstU32, Everything}, weights::IdentityFee, }; @@ -47,10 +47,9 @@ parameter_types! { type Balance = u128; +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = Everything; - type BlockWeights = (); - type BlockLength = (); type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type RuntimeTask = RuntimeTask; @@ -60,16 +59,8 @@ impl frame_system::Config for Test { type Lookup = IdentityLookup; type RuntimeEvent = RuntimeEvent; type BlockHashCount = BlockHashCount; - type DbWeight = (); - type Version = (); type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; type Nonce = u64; type Block = Block; } diff --git a/bridges/snowbridge/pallets/outbound-queue/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/Cargo.toml index 0956ac76678dca9fd57a7cc47c29cd62afc04ab7..f16a28cb1e457d9ebfb7804fa013e5b57858f79e 100644 --- a/bridges/snowbridge/pallets/outbound-queue/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-pallet-outbound-queue" description = "Snowbridge Outbound Queue Pallet" -version = "0.0.0" +version = "0.2.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.196", features = ["alloc", "derive"], default-features = false } +serde = { features = ["alloc", "derive"], workspace = true } codec = { version = "3.6.1", package = "parity-scale-codec", default-features = false, features = ["derive"] } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } hex-literal = { version = "0.4.1", optional = true } diff --git a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml index c185d5af7062045f40946fcbd3c45cb62b932216..0606e9de33056c9dffae50befcc1da5e865dca44 100644 --- a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-outbound-queue-merkle-tree" description = "Snowbridge Outbound Queue Merkle Tree" -version = "0.1.1" +version = "0.3.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml index 347b3bae493b7491790854be7a28f82386d2ee4b..cb68fd0a250a92e7f6a6693f3aebf1c8553308aa 100644 --- a/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-outbound-queue-runtime-api" description = "Snowbridge Outbound Queue Runtime API" -version = "0.0.0" +version = "0.2.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/pallets/outbound-queue/src/mock.rs b/bridges/snowbridge/pallets/outbound-queue/src/mock.rs index dd8fee4e2ed08ec0f3090b765fa882b063a98300..6e78fb4467210e3cb5e1eb581b377cbbfeac74ad 100644 --- a/bridges/snowbridge/pallets/outbound-queue/src/mock.rs +++ b/bridges/snowbridge/pallets/outbound-queue/src/mock.rs @@ -3,7 +3,7 @@ use super::*; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{Everything, Hooks}, weights::IdentityFee, }; @@ -37,10 +37,9 @@ parameter_types! { pub const BlockHashCount: u64 = 250; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = Everything; - type BlockWeights = (); - type BlockLength = (); type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type RuntimeTask = RuntimeTask; @@ -50,16 +49,7 @@ impl frame_system::Config for Test { type Lookup = IdentityLookup; type RuntimeEvent = RuntimeEvent; type BlockHashCount = BlockHashCount; - type DbWeight = (); - type Version = (); type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; type Nonce = u64; type Block = Block; } diff --git a/bridges/snowbridge/pallets/system/Cargo.toml b/bridges/snowbridge/pallets/system/Cargo.toml index f6c642e7376f7f068af1fe3c2cf9b11a2c50f2cc..5ad04290de044a2c8ed13aa092f5ea033aaafb97 100644 --- a/bridges/snowbridge/pallets/system/Cargo.toml +++ b/bridges/snowbridge/pallets/system/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-pallet-system" description = "Snowbridge System Pallet" -version = "0.0.0" +version = "0.2.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml b/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml index 355d2d29147f3cd84ae013363db874c9b9739b8e..eb02ae1db529730f51743e79a322e54db44fee51 100644 --- a/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml +++ b/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-system-runtime-api" description = "Snowbridge System Runtime API" -version = "0.0.0" +version = "0.2.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/pallets/system/src/lib.rs b/bridges/snowbridge/pallets/system/src/lib.rs index b7f38fb753d31bd67acb78174e175f90fc711175..6e5ceb5e9b1d42796567c3da5e549b2af3cfd4de 100644 --- a/bridges/snowbridge/pallets/system/src/lib.rs +++ b/bridges/snowbridge/pallets/system/src/lib.rs @@ -37,8 +37,6 @@ //! `force_update_channel` and extrinsics to manage agents and channels for system parachains. #![cfg_attr(not(feature = "std"), no_std)] -pub use pallet::*; - #[cfg(test)] mod mock; @@ -79,6 +77,8 @@ use xcm_executor::traits::ConvertLocation; #[cfg(feature = "runtime-benchmarks")] use frame_support::traits::OriginTrait; +pub use pallet::*; + pub type BalanceOf = <::Token as Inspect<::AccountId>>::Balance; pub type AccountIdOf = ::AccountId; diff --git a/bridges/snowbridge/pallets/system/src/mock.rs b/bridges/snowbridge/pallets/system/src/mock.rs index edc3f141b0735d7439b120c51da836fb8a77bd04..de2970dd550ba75fe42de08dc4d297cd5cccdf1f 100644 --- a/bridges/snowbridge/pallets/system/src/mock.rs +++ b/bridges/snowbridge/pallets/system/src/mock.rs @@ -2,8 +2,8 @@ // SPDX-FileCopyrightText: 2023 Snowfork use crate as snowbridge_system; use frame_support::{ - parameter_types, - traits::{tokens::fungible::Mutate, ConstU128, ConstU16, ConstU64, ConstU8}, + derive_impl, parameter_types, + traits::{tokens::fungible::Mutate, ConstU128, ConstU64, ConstU8}, weights::IdentityFee, PalletId, }; @@ -95,11 +95,9 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type RuntimeTask = RuntimeTask; @@ -109,15 +107,8 @@ impl frame_system::Config for Test { type Lookup = IdentityLookup; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; - type Version = (); type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = ConstU16<42>; - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; type Nonce = u64; type Block = Block; } diff --git a/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/mock.rs b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/mock.rs new file mode 100644 index 0000000000000000000000000000000000000000..77b5c1aa631db89a986837f258ee7dea45a580d0 --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/mock.rs @@ -0,0 +1,259 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use crate as ethereum_beacon_client; +use frame_support::parameter_types; +use pallet_timestamp; +use primitives::{Fork, ForkVersions}; +use sp_core::H256; +use sp_runtime::traits::{BlakeTwo256, IdentityLookup}; + +#[cfg(not(feature = "beacon-spec-mainnet"))] +pub mod minimal { + use super::*; + + use crate::config; + use frame_support::derive_impl; + use hex_literal::hex; + use primitives::CompactExecutionHeader; + use snowbridge_core::inbound::{Log, Proof}; + use sp_runtime::BuildStorage; + use std::{fs::File, path::PathBuf}; + + type Block = frame_system::mocking::MockBlock; + + frame_support::construct_runtime!( + pub enum Test { + System: frame_system::{Pallet, Call, Storage, Event}, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, + EthereumBeaconClient: ethereum_beacon_client::{Pallet, Call, Storage, Event}, + } + ); + + parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const SS58Prefix: u8 = 42; + } + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type RuntimeTask = RuntimeTask; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = BlockHashCount; + type PalletInfo = PalletInfo; + type SS58Prefix = SS58Prefix; + type Nonce = u64; + type Block = Block; + } + + impl pallet_timestamp::Config for Test { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = (); + type WeightInfo = (); + } + + parameter_types! { + pub const ExecutionHeadersPruneThreshold: u32 = 10; + pub const ChainForkVersions: ForkVersions = ForkVersions{ + genesis: Fork { + version: [0, 0, 0, 1], // 0x00000001 + epoch: 0, + }, + altair: Fork { + version: [1, 0, 0, 1], // 0x01000001 + epoch: 0, + }, + bellatrix: Fork { + version: [2, 0, 0, 1], // 0x02000001 + epoch: 0, + }, + capella: Fork { + version: [3, 0, 0, 1], // 0x03000001 + epoch: 0, + }, + }; + } + + impl ethereum_beacon_client::Config for Test { + type RuntimeEvent = RuntimeEvent; + type ForkVersions = ChainForkVersions; + type MaxExecutionHeadersToKeep = ExecutionHeadersPruneThreshold; + type WeightInfo = (); + } + + // Build genesis storage according to the mock runtime. + pub fn new_tester() -> sp_io::TestExternalities { + let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + let mut ext = sp_io::TestExternalities::new(t); + let _ = ext.execute_with(|| Timestamp::set(RuntimeOrigin::signed(1), 30_000)); + ext + } + + fn load_fixture(basename: &str) -> Result + where + T: for<'de> serde::Deserialize<'de>, + { + let filepath: PathBuf = + [env!("CARGO_MANIFEST_DIR"), "tests", "fixtures", basename].iter().collect(); + serde_json::from_reader(File::open(filepath).unwrap()) + } + + pub fn load_execution_header_update_fixture() -> primitives::ExecutionHeaderUpdate { + load_fixture("execution-header-update.minimal.json").unwrap() + } + + pub fn load_checkpoint_update_fixture( + ) -> primitives::CheckpointUpdate<{ config::SYNC_COMMITTEE_SIZE }> { + load_fixture("initial-checkpoint.minimal.json").unwrap() + } + + pub fn load_sync_committee_update_fixture( + ) -> primitives::Update<{ config::SYNC_COMMITTEE_SIZE }, { config::SYNC_COMMITTEE_BITS_SIZE }> { + load_fixture("sync-committee-update.minimal.json").unwrap() + } + + pub fn load_finalized_header_update_fixture( + ) -> primitives::Update<{ config::SYNC_COMMITTEE_SIZE }, { config::SYNC_COMMITTEE_BITS_SIZE }> { + load_fixture("finalized-header-update.minimal.json").unwrap() + } + + pub fn load_next_sync_committee_update_fixture( + ) -> primitives::Update<{ config::SYNC_COMMITTEE_SIZE }, { config::SYNC_COMMITTEE_BITS_SIZE }> { + load_fixture("next-sync-committee-update.minimal.json").unwrap() + } + + pub fn load_next_finalized_header_update_fixture( + ) -> primitives::Update<{ config::SYNC_COMMITTEE_SIZE }, { config::SYNC_COMMITTEE_BITS_SIZE }> { + load_fixture("next-finalized-header-update.minimal.json").unwrap() + } + + pub fn get_message_verification_payload() -> (Log, Proof) { + ( + Log { + address: hex!("ee9170abfbf9421ad6dd07f6bdec9d89f2b581e0").into(), + topics: vec![ + hex!("1b11dcf133cc240f682dab2d3a8e4cd35c5da8c9cf99adac4336f8512584c5ad").into(), + hex!("00000000000000000000000000000000000000000000000000000000000003e8").into(), + hex!("0000000000000000000000000000000000000000000000000000000000000001").into(), + ], + data: hex!("0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004b000f000000000000000100d184c103f7acc340847eee82a0b909e3358bc28d440edffa1352b13227e8ee646f3ea37456dec701345772617070656420457468657210574554481235003511000000000000000000000000000000000000000000").into(), + }, + Proof { + block_hash: hex!("05aaa60b0f27cce9e71909508527264b77ee14da7b5bf915fcc4e32715333213").into(), + tx_index: 0, + data: (vec![ + hex!("cf0d1c1ba57d1e0edfb59786c7e30c2b7e12bd54612b00cd21c4eaeecedf44fb").to_vec(), + hex!("d21fc4f68ab05bc4dcb23c67008e92c4d466437cdd6ed7aad0c008944c185510").to_vec(), + hex!("b9890f91ca0d77aa2a4adfaf9b9e40c94cac9e638b6d9797923865872944b646").to_vec(), + ], vec![ + hex!("f90131a0b601337b3aa10a671caa724eba641e759399979856141d3aea6b6b4ac59b889ba00c7d5dd48be9060221a02fb8fa213860b4c50d47046c8fa65ffaba5737d569e0a094601b62a1086cd9c9cb71a7ebff9e718f3217fd6e837efe4246733c0a196f63a06a4b0dd0aefc37b3c77828c8f07d1b7a2455ceb5dbfd3c77d7d6aeeddc2f7e8ca0d6e8e23142cdd8ec219e1f5d8b56aa18e456702b195deeaa210327284d42ade4a08a313d4c87023005d1ab631bbfe3f5de1e405d0e66d0bef3e033f1e5711b5521a0bf09a5d9a48b10ade82b8d6a5362a15921c8b5228a3487479b467db97411d82fa0f95cccae2a7c572ef3c566503e30bac2b2feb2d2f26eebf6d870dcf7f8cf59cea0d21fc4f68ab05bc4dcb23c67008e92c4d466437cdd6ed7aad0c008944c1855108080808080808080").to_vec(), + hex!("f851a0b9890f91ca0d77aa2a4adfaf9b9e40c94cac9e638b6d9797923865872944b646a060a634b9280e3a23fb63375e7bbdd9ab07fd379ab6a67e2312bbc112195fa358808080808080808080808080808080").to_vec(), + hex!("f9030820b9030402f90300018301d6e2b9010000000000000800000000000020040008000000000000000000000000400000008000000000000000000000000000000000000000000000000000000000042010000000001000000000000000000000000000000000040000000000000000000000000000000000000000000000008000000000000000002000000000000000000000000200000000000000200000000000100000000040000001000200008000000000000200000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000f901f5f87a942ffa5ecdbe006d30397c7636d3e015eee251369ff842a0c965575a00553e094ca7c5d14f02e107c258dda06867cbf9e0e69f80e71bbcc1a000000000000000000000000000000000000000000000000000000000000003e8a000000000000000000000000000000000000000000000000000000000000003e8f9011c94ee9170abfbf9421ad6dd07f6bdec9d89f2b581e0f863a01b11dcf133cc240f682dab2d3a8e4cd35c5da8c9cf99adac4336f8512584c5ada000000000000000000000000000000000000000000000000000000000000003e8a00000000000000000000000000000000000000000000000000000000000000001b8a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004b000f000000000000000100d184c103f7acc340847eee82a0b909e3358bc28d440edffa1352b13227e8ee646f3ea37456dec701345772617070656420457468657210574554481235003511000000000000000000000000000000000000000000f858948cf6147918a5cbb672703f879f385036f8793a24e1a01449abf21e49fd025f33495e77f7b1461caefdd3d4bb646424a3f445c4576a5ba0000000000000000000000000440edffa1352b13227e8ee646f3ea37456dec701").to_vec(), + ]), + } + ) + } + + pub fn get_message_verification_header() -> CompactExecutionHeader { + CompactExecutionHeader { + parent_hash: hex!("04a7f6ab8282203562c62f38b0ab41d32aaebe2c7ea687702b463148a6429e04") + .into(), + block_number: 55, + state_root: hex!("894d968712976d613519f973a317cb0781c7b039c89f27ea2b7ca193f7befdb3") + .into(), + receipts_root: hex!("cf0d1c1ba57d1e0edfb59786c7e30c2b7e12bd54612b00cd21c4eaeecedf44fb") + .into(), + } + } +} + +#[cfg(feature = "beacon-spec-mainnet")] +pub mod mainnet { + use super::*; + use frame_support::derive_impl; + + type Block = frame_system::mocking::MockBlock; + use sp_runtime::BuildStorage; + + frame_support::construct_runtime!( + pub enum Test { + System: frame_system::{Pallet, Call, Storage, Event}, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, + EthereumBeaconClient: ethereum_beacon_client::{Pallet, Call, Storage, Event}, + } + ); + + parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const SS58Prefix: u8 = 42; + } + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type RuntimeTask = RuntimeTask; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = BlockHashCount; + type PalletInfo = PalletInfo; + type SS58Prefix = SS58Prefix; + type Nonce = u64; + type Block = Block; + } + + impl pallet_timestamp::Config for Test { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = (); + type WeightInfo = (); + } + + parameter_types! { + pub const ChainForkVersions: ForkVersions = ForkVersions{ + genesis: Fork { + version: [0, 0, 16, 32], // 0x00001020 + epoch: 0, + }, + altair: Fork { + version: [1, 0, 16, 32], // 0x01001020 + epoch: 36660, + }, + bellatrix: Fork { + version: [2, 0, 16, 32], // 0x02001020 + epoch: 112260, + }, + capella: Fork { + version: [3, 0, 16, 32], // 0x03001020 + epoch: 162304, + }, + }; + pub const ExecutionHeadersPruneThreshold: u32 = 10; + } + + impl ethereum_beacon_client::Config for Test { + type RuntimeEvent = RuntimeEvent; + type ForkVersions = ChainForkVersions; + type MaxExecutionHeadersToKeep = ExecutionHeadersPruneThreshold; + type WeightInfo = (); + } + + // Build genesis storage according to the mock runtime. + pub fn new_tester() -> sp_io::TestExternalities { + let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + let mut ext = sp_io::TestExternalities::new(t); + let _ = ext.execute_with(|| Timestamp::set(RuntimeOrigin::signed(1), 30_000)); + ext + } +} diff --git a/bridges/snowbridge/primitives/beacon/Cargo.toml b/bridges/snowbridge/primitives/beacon/Cargo.toml index 4f5213b3422bc9fba49f62ce63ae304d45333152..d181fa1d3945a704a3d1e1e28fea67b7dea0ee15 100644 --- a/bridges/snowbridge/primitives/beacon/Cargo.toml +++ b/bridges/snowbridge/primitives/beacon/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-beacon-primitives" description = "Snowbridge Beacon Primitives" -version = "0.0.0" +version = "0.2.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true @@ -12,7 +12,7 @@ categories = ["cryptography::cryptocurrencies"] workspace = true [dependencies] -serde = { version = "1.0.196", optional = true, features = ["derive"] } +serde = { optional = true, features = ["derive"], workspace = true, default-features = true } hex = { version = "0.4", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } diff --git a/bridges/snowbridge/primitives/core/Cargo.toml b/bridges/snowbridge/primitives/core/Cargo.toml index 460c6c3442ec9c909013aab2b60c0f6c92c179a2..9a299ad0ae92326a6d0bb0391baf81e6e5bad663 100644 --- a/bridges/snowbridge/primitives/core/Cargo.toml +++ b/bridges/snowbridge/primitives/core/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-core" description = "Snowbridge Core" -version = "0.0.0" +version = "0.2.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true @@ -12,7 +12,7 @@ categories = ["cryptography::cryptocurrencies"] workspace = true [dependencies] -serde = { version = "1.0.196", optional = true, features = ["alloc", "derive"], default-features = false } +serde = { optional = true, features = ["alloc", "derive"], workspace = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } hex-literal = { version = "0.4.1" } diff --git a/bridges/snowbridge/primitives/ethereum/Cargo.toml b/bridges/snowbridge/primitives/ethereum/Cargo.toml index a9e2fe654212d6b9d1fd8a26a1838708c3328bfa..9fa725a6c0565a5f42847d89149878f8997d07a0 100644 --- a/bridges/snowbridge/primitives/ethereum/Cargo.toml +++ b/bridges/snowbridge/primitives/ethereum/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-ethereum" description = "Snowbridge Ethereum" -version = "0.1.0" +version = "0.3.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true @@ -12,8 +12,8 @@ categories = ["cryptography::cryptocurrencies"] workspace = true [dependencies] -serde = { version = "1.0.196", optional = true, features = ["derive"] } -serde-big-array = { version = "0.3.2", optional = true, features = ["const-generics"] } +serde = { optional = true, features = ["derive"], workspace = true, default-features = true } +serde-big-array = { optional = true, features = ["const-generics"], workspace = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } ethbloom = { version = "0.13.0", default-features = false } @@ -33,7 +33,7 @@ ethabi = { package = "ethabi-decode", version = "1.0.0", default-features = fals [dev-dependencies] wasm-bindgen-test = "0.3.19" rand = "0.8.5" -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/bridges/snowbridge/primitives/router/Cargo.toml b/bridges/snowbridge/primitives/router/Cargo.toml index c4fc688a946c0ada710f3ec665f5ea239aee0377..ded773e0d38917b7834679b3e521dfbe9539e51b 100644 --- a/bridges/snowbridge/primitives/router/Cargo.toml +++ b/bridges/snowbridge/primitives/router/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-router-primitives" description = "Snowbridge Router Primitives" -version = "0.0.0" +version = "0.9.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true @@ -12,7 +12,7 @@ categories = ["cryptography::cryptocurrencies"] workspace = true [dependencies] -serde = { version = "1.0.196", optional = true, features = ["derive"] } +serde = { optional = true, features = ["derive"], workspace = true, default-features = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } log = { workspace = true } diff --git a/bridges/snowbridge/runtime/runtime-common/Cargo.toml b/bridges/snowbridge/runtime/runtime-common/Cargo.toml index d4c86f8aa750134df8dfbe22a277a872e77f9175..bf5e9a8832dcf48113d5f74a92a060687da2fe4e 100644 --- a/bridges/snowbridge/runtime/runtime-common/Cargo.toml +++ b/bridges/snowbridge/runtime/runtime-common/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-runtime-common" description = "Snowbridge Runtime Common" -version = "0.0.0" +version = "0.2.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/runtime/test-common/Cargo.toml b/bridges/snowbridge/runtime/test-common/Cargo.toml index 868f72b7b86efd8cb91513f667eefb4315684ad1..4e8b311cb97812bb94140aa02405b3a174064a8f 100644 --- a/bridges/snowbridge/runtime/test-common/Cargo.toml +++ b/bridges/snowbridge/runtime/test-common/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-runtime-test-common" description = "Snowbridge Runtime Tests" -version = "0.0.0" +version = "0.2.0" authors = ["Snowfork "] edition = "2021" license = "Apache-2.0" @@ -15,7 +15,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = hex-literal = { version = "0.4.1" } log = { workspace = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", optional = true, features = ["derive"] } +serde = { optional = true, features = ["derive"], workspace = true, default-features = true } smallvec = "1.11.0" # Substrate diff --git a/bridges/snowbridge/runtime/test-common/src/lib.rs b/bridges/snowbridge/runtime/test-common/src/lib.rs index c9bbce98e575d5e55015aa7814d8cd57a5c3a966..7455adf76170acefd50f06e8a40ef1c79028f49f 100644 --- a/bridges/snowbridge/runtime/test-common/src/lib.rs +++ b/bridges/snowbridge/runtime/test-common/src/lib.rs @@ -13,9 +13,9 @@ use parachains_runtimes_test_utils::{ }; use snowbridge_core::{ChannelId, ParaId}; use snowbridge_pallet_ethereum_client_fixtures::*; -use sp_core::H160; +use sp_core::{H160, U256}; use sp_keyring::AccountKeyring::*; -use sp_runtime::{traits::Header, AccountId32, SaturatedConversion, Saturating}; +use sp_runtime::{traits::Header, AccountId32, DigestItem, SaturatedConversion, Saturating}; use xcm::{ latest::prelude::*, v3::Error::{self, Barrier}, @@ -40,6 +40,7 @@ where } pub fn send_transfer_token_message( + ethereum_chain_id: u64, assethub_parachain_id: u32, weth_contract_address: H160, destination_address: H160, @@ -53,7 +54,8 @@ where + parachain_info::Config + pallet_collator_selection::Config + cumulus_pallet_parachain_system::Config - + snowbridge_pallet_outbound_queue::Config, + + snowbridge_pallet_outbound_queue::Config + + pallet_timestamp::Config, XcmConfig: xcm_executor::Config, { let assethub_parachain_location = Location::new(1, Parachain(assethub_parachain_id)); @@ -88,7 +90,7 @@ where WithdrawAsset(Assets::from(vec![fee.clone()])), BuyExecution { fees: fee, weight_limit: Unlimited }, ExportMessage { - network: Ethereum { chain_id: 11155111 }, + network: Ethereum { chain_id: ethereum_chain_id }, destination: Here, xcm: inner_xcm, }, @@ -106,6 +108,7 @@ where } pub fn send_transfer_token_message_success( + ethereum_chain_id: u64, collator_session_key: CollatorSessionKeys, runtime_para_id: u32, assethub_parachain_id: u32, @@ -125,7 +128,8 @@ pub fn send_transfer_token_message_success( + pallet_message_queue::Config + cumulus_pallet_parachain_system::Config + snowbridge_pallet_outbound_queue::Config - + snowbridge_pallet_system::Config, + + snowbridge_pallet_system::Config + + pallet_timestamp::Config, XcmConfig: xcm_executor::Config, ValidatorIdOf: From>, ::AccountId: From + AsRef<[u8]>, @@ -147,6 +151,7 @@ pub fn send_transfer_token_message_success( initial_fund::(assethub_parachain_id, 5_000_000_000_000); let outcome = send_transfer_token_message::( + ethereum_chain_id, assethub_parachain_id, weth_contract_address, destination_address, @@ -193,13 +198,104 @@ pub fn send_transfer_token_message_success( let digest = included_head.digest(); - //let digest = frame_system::Pallet::::digest(); let digest_items = digest.logs(); assert!(digest_items.len() == 1 && digest_items[0].as_other().is_some()); }); } +pub fn ethereum_outbound_queue_processes_messages_before_message_queue_works< + Runtime, + XcmConfig, + AllPalletsWithoutSystem, +>( + ethereum_chain_id: u64, + collator_session_key: CollatorSessionKeys, + runtime_para_id: u32, + assethub_parachain_id: u32, + weth_contract_address: H160, + destination_address: H160, + fee_amount: u128, + snowbridge_pallet_outbound_queue: Box< + dyn Fn(Vec) -> Option>, + >, +) where + Runtime: frame_system::Config + + pallet_balances::Config + + pallet_session::Config + + pallet_xcm::Config + + parachain_info::Config + + pallet_collator_selection::Config + + pallet_message_queue::Config + + cumulus_pallet_parachain_system::Config + + snowbridge_pallet_outbound_queue::Config + + snowbridge_pallet_system::Config + + pallet_timestamp::Config, + XcmConfig: xcm_executor::Config, + AllPalletsWithoutSystem: + OnInitialize> + OnFinalize>, + ValidatorIdOf: From>, + ::AccountId: From + AsRef<[u8]>, +{ + ExtBuilder::::default() + .with_collators(collator_session_key.collators()) + .with_session_keys(collator_session_key.session_keys()) + .with_para_id(runtime_para_id.into()) + .with_tracing() + .build() + .execute_with(|| { + >::initialize( + runtime_para_id.into(), + assethub_parachain_id.into(), + ) + .unwrap(); + + // fund asset hub sovereign account enough so it can pay fees + initial_fund::(assethub_parachain_id, 5_000_000_000_000); + + let outcome = send_transfer_token_message::( + ethereum_chain_id, + assethub_parachain_id, + weth_contract_address, + destination_address, + fee_amount, + ); + + assert_ok!(outcome.ensure_complete()); + + // check events + let mut events = >::events() + .into_iter() + .filter_map(|e| snowbridge_pallet_outbound_queue(e.event.encode())); + assert!(events.any(|e| matches!( + e, + snowbridge_pallet_outbound_queue::Event::MessageQueued { .. } + ))); + + let next_block_number: U256 = >::block_number() + .saturating_add(BlockNumberFor::::from(1u32)) + .into(); + + let included_head = + RuntimeHelper::::run_to_block_with_finalize( + next_block_number.as_u32(), + ); + let digest = included_head.digest(); + let digest_items = digest.logs(); + + let mut found_outbound_digest = false; + for digest_item in digest_items { + match digest_item { + DigestItem::Other(_) => found_outbound_digest = true, + _ => {}, + } + } + + assert_eq!(found_outbound_digest, true); + }); +} + pub fn send_unpaid_transfer_token_message( + ethereum_chain_id: u64, collator_session_key: CollatorSessionKeys, runtime_para_id: u32, assethub_parachain_id: u32, @@ -213,7 +309,8 @@ pub fn send_unpaid_transfer_token_message( + parachain_info::Config + pallet_collator_selection::Config + cumulus_pallet_parachain_system::Config - + snowbridge_pallet_outbound_queue::Config, + + snowbridge_pallet_outbound_queue::Config + + pallet_timestamp::Config, XcmConfig: xcm_executor::Config, ValidatorIdOf: From>, { @@ -262,7 +359,7 @@ pub fn send_unpaid_transfer_token_message( let xcm = Xcm(vec![ UnpaidExecution { weight_limit: Unlimited, check_origin: None }, ExportMessage { - network: Ethereum { chain_id: 11155111 }, + network: Ethereum { chain_id: ethereum_chain_id }, destination: Here, xcm: inner_xcm, }, @@ -284,6 +381,7 @@ pub fn send_unpaid_transfer_token_message( #[allow(clippy::too_many_arguments)] pub fn send_transfer_token_message_failure( + ethereum_chain_id: u64, collator_session_key: CollatorSessionKeys, runtime_para_id: u32, assethub_parachain_id: u32, @@ -301,7 +399,8 @@ pub fn send_transfer_token_message_failure( + pallet_collator_selection::Config + cumulus_pallet_parachain_system::Config + snowbridge_pallet_outbound_queue::Config - + snowbridge_pallet_system::Config, + + snowbridge_pallet_system::Config + + pallet_timestamp::Config, XcmConfig: xcm_executor::Config, ValidatorIdOf: From>, { @@ -322,6 +421,7 @@ pub fn send_transfer_token_message_failure( initial_fund::(assethub_parachain_id, initial_amount); let outcome = send_transfer_token_message::( + ethereum_chain_id, assethub_parachain_id, weth_contract_address, destination_address, @@ -349,7 +449,8 @@ pub fn ethereum_extrinsic( + cumulus_pallet_parachain_system::Config + snowbridge_pallet_outbound_queue::Config + snowbridge_pallet_system::Config - + snowbridge_pallet_ethereum_client::Config, + + snowbridge_pallet_ethereum_client::Config + + pallet_timestamp::Config, ValidatorIdOf: From>, ::RuntimeCall: From>, @@ -430,7 +531,8 @@ pub fn ethereum_to_polkadot_message_extrinsics_work( + cumulus_pallet_parachain_system::Config + snowbridge_pallet_outbound_queue::Config + snowbridge_pallet_system::Config - + snowbridge_pallet_ethereum_client::Config, + + snowbridge_pallet_ethereum_client::Config + + pallet_timestamp::Config, ValidatorIdOf: From>, ::RuntimeCall: From>, diff --git a/bridges/snowbridge/scripts/contribute-upstream.sh b/bridges/snowbridge/scripts/contribute-upstream.sh new file mode 100755 index 0000000000000000000000000000000000000000..32005b770ecf44cb9af18c61f830243ed5287e68 --- /dev/null +++ b/bridges/snowbridge/scripts/contribute-upstream.sh @@ -0,0 +1,82 @@ +#!/bin/bash + +# A script to cleanup the Snowfork fork of the polkadot-sdk to contribute it upstream back to parity/polkadot-sdk +# ./bridges/snowbridge/scripts/contribute-upstream.sh + +# show CLI help +function show_help() { + set +x + echo " " + echo Error: $1 + echo "Usage:" + echo " ./bridges/snowbridge/scripts/contribute-upstream.sh Exit with code 0 if pallets code is well decoupled from the other code in the repo" + exit 1 +} + +if [[ -z "$1" ]]; then + echo "Please provide a branch name you would like your upstream branch to be named" + exit 1 +fi + +branch_name=$1 + +set -eux + +# let's avoid any restrictions on where this script can be called for - snowbridge repo may be +# plugged into any other repo folder. So the script (and other stuff that needs to be removed) +# may be located either in call dir, or one of it subdirs. +SNOWBRIDGE_FOLDER="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )/../" + +# Get the current Git branch name +current_branch=$(git rev-parse --abbrev-ref HEAD) + +if [ "$current_branch" = "$branch_name" ] || git branch | grep -q "$branch_name"; then + echo "Already on requested branch or branch exists, not creating." +else + git branch "$branch_name" +fi + +git checkout "$branch_name" + +# remove everything we think is not required for our needs +rm -rf rust-toolchain.toml +rm -rf codecov.yml +rm -rf $SNOWBRIDGE_FOLDER/.cargo +rm -rf $SNOWBRIDGE_FOLDER/.github +rm -rf $SNOWBRIDGE_FOLDER/SECURITY.md +rm -rf $SNOWBRIDGE_FOLDER/.gitignore +rm -rf $SNOWBRIDGE_FOLDER/rustfmt.toml +rm -rf $SNOWBRIDGE_FOLDER/templates +rm -rf $SNOWBRIDGE_FOLDER/pallets/ethereum-client/fuzz + +pushd $SNOWBRIDGE_FOLDER + +# let's test if everything we need compiles +cargo check -p snowbridge-pallet-ethereum-client +cargo check -p snowbridge-pallet-ethereum-client --features runtime-benchmarks +cargo check -p snowbridge-pallet-ethereum-client --features try-runtime +cargo check -p snowbridge-pallet-inbound-queue +cargo check -p snowbridge-pallet-inbound-queue --features runtime-benchmarks +cargo check -p snowbridge-pallet-inbound-queue --features try-runtime +cargo check -p snowbridge-pallet-outbound-queue +cargo check -p snowbridge-pallet-outbound-queue --features runtime-benchmarks +cargo check -p snowbridge-pallet-outbound-queue --features try-runtime +cargo check -p snowbridge-pallet-system +cargo check -p snowbridge-pallet-system --features runtime-benchmarks +cargo check -p snowbridge-pallet-system --features try-runtime + +# we're removing lock file after all checks are done. Otherwise we may use different +# Substrate/Polkadot/Cumulus commits and our checks will fail +rm -f $SNOWBRIDGE_FOLDER/Cargo.toml +rm -f $SNOWBRIDGE_FOLDER/Cargo.lock + +popd + +# Replace Parity's CI files, that we have overwritten in our fork, to run our own CI +rm -rf .github +git remote -v | grep -w parity || git remote add parity https://github.com/paritytech/polkadot-sdk +git fetch parity master +git checkout parity/master -- .github +git add -- .github + +echo "OK" diff --git a/bridges/snowbridge/scripts/verify-pallets-build.sh b/bridges/snowbridge/scripts/verify-pallets-build.sh deleted file mode 100755 index a62f48c84d4fd34731c20365a20097e086aa2c99..0000000000000000000000000000000000000000 --- a/bridges/snowbridge/scripts/verify-pallets-build.sh +++ /dev/null @@ -1,116 +0,0 @@ -#!/bin/bash - -# A script to remove everything from snowbridge repository/subtree, except: -# -# - parachain -# - readme -# - license - -set -eu - -# show CLI help -function show_help() { - set +x - echo " " - echo Error: $1 - echo "Usage:" - echo " ./scripts/verify-pallets-build.sh Exit with code 0 if pallets code is well decoupled from the other code in the repo" - echo "Options:" - echo " --no-revert Leaves only runtime code on exit" - echo " --ignore-git-state Ignores git actual state" - exit 1 -} - -# parse CLI args -NO_REVERT= -IGNORE_GIT_STATE= -for i in "$@" -do - case $i in - --no-revert) - NO_REVERT=true - shift - ;; - --ignore-git-state) - IGNORE_GIT_STATE=true - shift - ;; - *) - show_help "Unknown option: $i" - ;; - esac -done - -# the script is able to work only on clean git copy, unless we want to ignore this check -[[ ! -z "${IGNORE_GIT_STATE}" ]] || [[ -z "$(git status --porcelain)" ]] || { echo >&2 "The git copy must be clean"; exit 1; } - -# let's avoid any restrictions on where this script can be called for - snowbridge repo may be -# plugged into any other repo folder. So the script (and other stuff that needs to be removed) -# may be located either in call dir, or one of it subdirs. -SNOWBRIDGE_FOLDER="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )/../.." - -# remove everything we think is not required for our needs -rm -rf $SNOWBRIDGE_FOLDER/.cargo -rm -rf $SNOWBRIDGE_FOLDER/.github -rm -rf $SNOWBRIDGE_FOLDER/contracts -rm -rf $SNOWBRIDGE_FOLDER/codecov.yml -rm -rf $SNOWBRIDGE_FOLDER/docs -rm -rf $SNOWBRIDGE_FOLDER/hooks -rm -rf $SNOWBRIDGE_FOLDER/relayer -rm -rf $SNOWBRIDGE_FOLDER/scripts -rm -rf $SNOWBRIDGE_FOLDER/SECURITY.md -rm -rf $SNOWBRIDGE_FOLDER/smoketest -rm -rf $SNOWBRIDGE_FOLDER/web -rm -rf $SNOWBRIDGE_FOLDER/.envrc-example -rm -rf $SNOWBRIDGE_FOLDER/.gitbook.yaml -rm -rf $SNOWBRIDGE_FOLDER/.gitignore -rm -rf $SNOWBRIDGE_FOLDER/.gitmodules -rm -rf $SNOWBRIDGE_FOLDER/_typos.toml -rm -rf $SNOWBRIDGE_FOLDER/_codecov.yml -rm -rf $SNOWBRIDGE_FOLDER/flake.lock -rm -rf $SNOWBRIDGE_FOLDER/flake.nix -rm -rf $SNOWBRIDGE_FOLDER/go.work -rm -rf $SNOWBRIDGE_FOLDER/go.work.sum -rm -rf $SNOWBRIDGE_FOLDER/polkadot-sdk -rm -rf $SNOWBRIDGE_FOLDER/rust-toolchain.toml -rm -rf $SNOWBRIDGE_FOLDER/parachain/rustfmt.toml -rm -rf $SNOWBRIDGE_FOLDER/parachain/.gitignore -rm -rf $SNOWBRIDGE_FOLDER/parachain/templates -rm -rf $SNOWBRIDGE_FOLDER/parachain/.cargo -rm -rf $SNOWBRIDGE_FOLDER/parachain/.config -rm -rf $SNOWBRIDGE_FOLDER/parachain/pallets/ethereum-client/fuzz - -cd bridges/snowbridge/parachain - -# fix polkadot-sdk paths in Cargo.toml files -find "." -name 'Cargo.toml' | while read -r file; do - replace=$(printf '../../' ) - if [[ "$(uname)" = "Darwin" ]] || [[ "$(uname)" = *BSD ]]; then - sed -i '' "s|polkadot-sdk/|$replace|g" "$file" - else - sed -i "s|polkadot-sdk/|$replace|g" "$file" - fi -done - -# let's test if everything we need compiles -cargo check -p snowbridge-pallet-ethereum-client -cargo check -p snowbridge-pallet-ethereum-client --features runtime-benchmarks -cargo check -p snowbridge-pallet-ethereum-client --features try-runtime -cargo check -p snowbridge-pallet-inbound-queue -cargo check -p snowbridge-pallet-inbound-queue --features runtime-benchmarks -cargo check -p snowbridge-pallet-inbound-queue --features try-runtime -cargo check -p snowbridge-pallet-outbound-queue -cargo check -p snowbridge-pallet-outbound-queue --features runtime-benchmarks -cargo check -p snowbridge-pallet-outbound-queue --features try-runtime -cargo check -p snowbridge-pallet-system -cargo check -p snowbridge-pallet-system --features runtime-benchmarks -cargo check -p snowbridge-pallet-system --features try-runtime - -cd - - -# we're removing lock file after all checks are done. Otherwise we may use different -# Substrate/Polkadot/Cumulus commits and our checks will fail -rm -f $SNOWBRIDGE_FOLDER/parachain/Cargo.toml -rm -f $SNOWBRIDGE_FOLDER/parachain/Cargo.lock - -echo "OK" diff --git a/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh b/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh index 84764cdaca38c6bd19eb964c6e8d4fd54c4379c4..479ab833abfc38dd978c7b7d3abdd4c1fe37ad64 100755 --- a/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh +++ b/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh @@ -1,7 +1,7 @@ #!/bin/bash # import common functions -source "${BASH_SOURCE%/*}/../../utils/bridges.sh" +source "$FRAMEWORK_PATH/utils/bridges.sh" # Expected sovereign accounts. # @@ -319,6 +319,7 @@ case "$1" in $XCM_VERSION ;; reserve-transfer-assets-from-asset-hub-rococo-local) + amount=$2 ensure_polkadot_js_api # send ROCs to Alice account on AHW limited_reserve_transfer_assets \ @@ -326,11 +327,12 @@ case "$1" in "//Alice" \ "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Westend" }, { "Parachain": 1000 } ] } } }')" \ "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ - "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 1, "interior": "Here" } }, "fun": { "Fungible": 5000000000000 } } ] }')" \ + "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 1, "interior": "Here" } }, "fun": { "Fungible": '$amount' } } ] }')" \ 0 \ "Unlimited" ;; withdraw-reserve-assets-from-asset-hub-rococo-local) + amount=$2 ensure_polkadot_js_api # send back only 100000000000 wrappedWNDs to Alice account on AHW limited_reserve_transfer_assets \ @@ -338,11 +340,12 @@ case "$1" in "//Alice" \ "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Westend" }, { "Parachain": 1000 } ] } } }')" \ "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ - "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 2, "interior": { "X1": { "GlobalConsensus": "Westend" } } } }, "fun": { "Fungible": 3000000000000 } } ] }')" \ + "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 2, "interior": { "X1": { "GlobalConsensus": "Westend" } } } }, "fun": { "Fungible": '$amount' } } ] }')" \ 0 \ "Unlimited" ;; reserve-transfer-assets-from-asset-hub-westend-local) + amount=$2 ensure_polkadot_js_api # send WNDs to Alice account on AHR limited_reserve_transfer_assets \ @@ -350,11 +353,12 @@ case "$1" in "//Alice" \ "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Rococo" }, { "Parachain": 1000 } ] } } }')" \ "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ - "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 1, "interior": "Here" } }, "fun": { "Fungible": 5000000000000 } } ] }')" \ + "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 1, "interior": "Here" } }, "fun": { "Fungible": '$amount' } } ] }')" \ 0 \ "Unlimited" ;; withdraw-reserve-assets-from-asset-hub-westend-local) + amount=$2 ensure_polkadot_js_api # send back only 100000000000 wrappedROCs to Alice account on AHR limited_reserve_transfer_assets \ @@ -362,7 +366,7 @@ case "$1" in "//Alice" \ "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Rococo" }, { "Parachain": 1000 } ] } } }')" \ "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ - "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 2, "interior": { "X1": { "GlobalConsensus": "Rococo" } } } }, "fun": { "Fungible": 3000000000000 } } ] }')" \ + "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 2, "interior": { "X1": { "GlobalConsensus": "Rococo" } } } }, "fun": { "Fungible": '$amount' } } ] }')" \ 0 \ "Unlimited" ;; diff --git a/bridges/testing/environments/rococo-westend/helper.sh b/bridges/testing/environments/rococo-westend/helper.sh index 211a5b53b3d99ebef0e8c6a34cda48321badca9d..0a13ded213f5d3a0920cb466fc974c129e9ad79a 100755 --- a/bridges/testing/environments/rococo-westend/helper.sh +++ b/bridges/testing/environments/rococo-westend/helper.sh @@ -1,3 +1,3 @@ #!/bin/bash -$POLKADOT_SDK_PATH/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh "$@" +$ENV_PATH/bridges_rococo_westend.sh "$@" diff --git a/bridges/testing/environments/rococo-westend/rococo-init.zndsl b/bridges/testing/environments/rococo-westend/rococo-init.zndsl index 145f2df73a6ec4b4179b12f0edbad5a6b95a7cba..c913e4db31f49184eb8214fda4d525c3594b358b 100644 --- a/bridges/testing/environments/rococo-westend/rococo-init.zndsl +++ b/bridges/testing/environments/rococo-westend/rococo-init.zndsl @@ -1,8 +1,8 @@ -Description: User is able to transfer WND from Westend Asset Hub to Rococo Asset Hub and back +Description: Check if the HRMP channel between Rococo BH and Rococo AH was opened successfully Network: ./bridge_hub_rococo_local_network.toml Creds: config # ensure that initialization has completed -asset-hub-rococo-collator1: js-script ../../js-helpers/wait-hrmp-channel-opened.js with "1013" within 300 seconds +asset-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/wait-hrmp-channel-opened.js with "1013" within 300 seconds diff --git a/bridges/testing/environments/rococo-westend/rococo.zndsl b/bridges/testing/environments/rococo-westend/rococo.zndsl index bd8681af2196814405a7d96e55882f00cbc0dc02..5b49c7c632fa4dd0ce77134858a2f697acbfff16 100644 --- a/bridges/testing/environments/rococo-westend/rococo.zndsl +++ b/bridges/testing/environments/rococo-westend/rococo.zndsl @@ -1,7 +1,7 @@ -Description: User is able to transfer WND from Westend Asset Hub to Rococo Asset Hub and back +Description: Check if the with-Westend GRANPDA pallet was initialized at Rococo BH Network: ./bridge_hub_rococo_local_network.toml Creds: config # relay is already started - let's wait until with-Westend GRANPDA pallet is initialized at Rococo -bridge-hub-rococo-collator1: js-script ../../js-helpers/best-finalized-header-at-bridged-chain.js with "Westend,0" within 400 seconds +bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/best-finalized-header-at-bridged-chain.js with "Westend,0" within 400 seconds diff --git a/bridges/testing/environments/rococo-westend/spawn.sh b/bridges/testing/environments/rococo-westend/spawn.sh index 5a0d65ce65dba1e5649920a419b7e9fa3da00367..cbd0b1bc623ab77876ed5ce3beefd7ab72db2d37 100755 --- a/bridges/testing/environments/rococo-westend/spawn.sh +++ b/bridges/testing/environments/rococo-westend/spawn.sh @@ -4,7 +4,7 @@ set -e trap "trap - SIGTERM && kill -9 -$$" SIGINT SIGTERM EXIT -source "${BASH_SOURCE%/*}/../../utils/zombienet.sh" +source "$FRAMEWORK_PATH/utils/zombienet.sh" # whether to init the chains (open HRMP channels, set XCM version, create reserve assets, etc) init=0 diff --git a/bridges/testing/environments/rococo-westend/start_relayer.sh b/bridges/testing/environments/rococo-westend/start_relayer.sh index c57d4f1a437493afd627efb989cba50222a3653c..7ddd312d395aa8733d2afea59277b48721c8a36b 100755 --- a/bridges/testing/environments/rococo-westend/start_relayer.sh +++ b/bridges/testing/environments/rococo-westend/start_relayer.sh @@ -2,8 +2,8 @@ set -e -source "${BASH_SOURCE%/*}/../../utils/common.sh" -source "${BASH_SOURCE%/*}/../../utils/zombienet.sh" +source "$FRAMEWORK_PATH/utils/common.sh" +source "$FRAMEWORK_PATH/utils/zombienet.sh" rococo_dir=$1 westend_dir=$2 diff --git a/bridges/testing/environments/rococo-westend/westend-init.zndsl b/bridges/testing/environments/rococo-westend/westend-init.zndsl index 2f8e665d592d587f3e88b45b8e5dcfa1e05780cc..0f5428eed3b01c042f8aad3b3df51c3a800a9b72 100644 --- a/bridges/testing/environments/rococo-westend/westend-init.zndsl +++ b/bridges/testing/environments/rococo-westend/westend-init.zndsl @@ -1,7 +1,7 @@ -Description: User is able to transfer ROC from Rococo Asset Hub to Westend Asset Hub and back +Description: Check if the HRMP channel between Westend BH and Westend AH was opened successfully Network: ./bridge_hub_westend_local_network.toml Creds: config # ensure that initialization has completed -asset-hub-westend-collator1: js-script ../../js-helpers/wait-hrmp-channel-opened.js with "1002" within 600 seconds +asset-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/wait-hrmp-channel-opened.js with "1002" within 600 seconds diff --git a/bridges/testing/environments/rococo-westend/westend.zndsl b/bridges/testing/environments/rococo-westend/westend.zndsl index c75ae579d27ab6eb979d6a0b88ab481940eb0e76..07968838852f7c0a00131db3080c460c07d08206 100644 --- a/bridges/testing/environments/rococo-westend/westend.zndsl +++ b/bridges/testing/environments/rococo-westend/westend.zndsl @@ -1,6 +1,6 @@ -Description: User is able to transfer ROC from Rococo Asset Hub to Westend Asset Hub and back +Description: Check if the with-Rococo GRANPDA pallet was initialized at Westend BH Network: ./bridge_hub_westend_local_network.toml Creds: config # relay is already started - let's wait until with-Rococo GRANPDA pallet is initialized at Westend -bridge-hub-westend-collator1: js-script ../../js-helpers/best-finalized-header-at-bridged-chain.js with "Rococo,0" within 400 seconds +bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/best-finalized-header-at-bridged-chain.js with "Rococo,0" within 400 seconds diff --git a/bridges/testing/js-helpers/best-finalized-header-at-bridged-chain.js b/bridges/testing/framework/js-helpers/best-finalized-header-at-bridged-chain.js similarity index 100% rename from bridges/testing/js-helpers/best-finalized-header-at-bridged-chain.js rename to bridges/testing/framework/js-helpers/best-finalized-header-at-bridged-chain.js diff --git a/bridges/testing/js-helpers/chains/rococo-at-westend.js b/bridges/testing/framework/js-helpers/chains/rococo-at-westend.js similarity index 100% rename from bridges/testing/js-helpers/chains/rococo-at-westend.js rename to bridges/testing/framework/js-helpers/chains/rococo-at-westend.js diff --git a/bridges/testing/js-helpers/chains/westend-at-rococo.js b/bridges/testing/framework/js-helpers/chains/westend-at-rococo.js similarity index 100% rename from bridges/testing/js-helpers/chains/westend-at-rococo.js rename to bridges/testing/framework/js-helpers/chains/westend-at-rococo.js diff --git a/bridges/testing/js-helpers/native-assets-balance-increased.js b/bridges/testing/framework/js-helpers/native-assets-balance-increased.js similarity index 82% rename from bridges/testing/js-helpers/native-assets-balance-increased.js rename to bridges/testing/framework/js-helpers/native-assets-balance-increased.js index a35c753d97326d7b200c33844e9c5b8be22dfebc..749c3e2fec32ac0af4d244c53cb4ac1c6237817a 100644 --- a/bridges/testing/js-helpers/native-assets-balance-increased.js +++ b/bridges/testing/framework/js-helpers/native-assets-balance-increased.js @@ -3,12 +3,13 @@ async function run(nodeName, networkInfo, args) { const api = await zombie.connect(wsUri, userDefinedTypes); const accountAddress = args[0]; + const expectedIncrease = BigInt(args[1]); const initialAccountData = await api.query.system.account(accountAddress); const initialAccountBalance = initialAccountData.data['free']; while (true) { const accountData = await api.query.system.account(accountAddress); const accountBalance = accountData.data['free']; - if (accountBalance > initialAccountBalance) { + if (accountBalance > initialAccountBalance + expectedIncrease) { return accountBalance; } @@ -17,4 +18,4 @@ async function run(nodeName, networkInfo, args) { } } -module.exports = { run } +module.exports = {run} diff --git a/bridges/testing/js-helpers/only-mandatory-headers-synced-when-idle.js b/bridges/testing/framework/js-helpers/only-mandatory-headers-synced-when-idle.js similarity index 100% rename from bridges/testing/js-helpers/only-mandatory-headers-synced-when-idle.js rename to bridges/testing/framework/js-helpers/only-mandatory-headers-synced-when-idle.js diff --git a/bridges/testing/js-helpers/only-required-headers-synced-when-idle.js b/bridges/testing/framework/js-helpers/only-required-headers-synced-when-idle.js similarity index 100% rename from bridges/testing/js-helpers/only-required-headers-synced-when-idle.js rename to bridges/testing/framework/js-helpers/only-required-headers-synced-when-idle.js diff --git a/bridges/testing/js-helpers/relayer-rewards.js b/bridges/testing/framework/js-helpers/relayer-rewards.js similarity index 100% rename from bridges/testing/js-helpers/relayer-rewards.js rename to bridges/testing/framework/js-helpers/relayer-rewards.js diff --git a/bridges/testing/js-helpers/utils.js b/bridges/testing/framework/js-helpers/utils.js similarity index 100% rename from bridges/testing/js-helpers/utils.js rename to bridges/testing/framework/js-helpers/utils.js diff --git a/bridges/testing/js-helpers/wait-hrmp-channel-opened.js b/bridges/testing/framework/js-helpers/wait-hrmp-channel-opened.js similarity index 100% rename from bridges/testing/js-helpers/wait-hrmp-channel-opened.js rename to bridges/testing/framework/js-helpers/wait-hrmp-channel-opened.js diff --git a/bridges/testing/js-helpers/wrapped-assets-balance.js b/bridges/testing/framework/js-helpers/wrapped-assets-balance.js similarity index 100% rename from bridges/testing/js-helpers/wrapped-assets-balance.js rename to bridges/testing/framework/js-helpers/wrapped-assets-balance.js diff --git a/bridges/testing/utils/bridges.sh b/bridges/testing/framework/utils/bridges.sh similarity index 98% rename from bridges/testing/utils/bridges.sh rename to bridges/testing/framework/utils/bridges.sh index cfde5dfd26b720e5eba20b0b0b3685e665163e3f..7c8399461584a85e4e8eedf5f347d9d74725f1c9 100755 --- a/bridges/testing/utils/bridges.sh +++ b/bridges/testing/framework/utils/bridges.sh @@ -41,8 +41,8 @@ function ensure_polkadot_js_api() { echo "" echo "" echo "-------------------" - echo "Installing (nodejs) sub module: $(dirname "$0")/generate_hex_encoded_call" - pushd $(dirname "$0")/generate_hex_encoded_call + echo "Installing (nodejs) sub module: ${BASH_SOURCE%/*}/generate_hex_encoded_call" + pushd ${BASH_SOURCE%/*}/generate_hex_encoded_call npm install popd fi diff --git a/bridges/testing/utils/common.sh b/bridges/testing/framework/utils/common.sh similarity index 100% rename from bridges/testing/utils/common.sh rename to bridges/testing/framework/utils/common.sh diff --git a/bridges/testing/utils/generate_hex_encoded_call/index.js b/bridges/testing/framework/utils/generate_hex_encoded_call/index.js similarity index 100% rename from bridges/testing/utils/generate_hex_encoded_call/index.js rename to bridges/testing/framework/utils/generate_hex_encoded_call/index.js diff --git a/bridges/testing/utils/generate_hex_encoded_call/package-lock.json b/bridges/testing/framework/utils/generate_hex_encoded_call/package-lock.json similarity index 100% rename from bridges/testing/utils/generate_hex_encoded_call/package-lock.json rename to bridges/testing/framework/utils/generate_hex_encoded_call/package-lock.json diff --git a/bridges/testing/utils/generate_hex_encoded_call/package.json b/bridges/testing/framework/utils/generate_hex_encoded_call/package.json similarity index 100% rename from bridges/testing/utils/generate_hex_encoded_call/package.json rename to bridges/testing/framework/utils/generate_hex_encoded_call/package.json diff --git a/bridges/testing/utils/zombienet.sh b/bridges/testing/framework/utils/zombienet.sh similarity index 100% rename from bridges/testing/utils/zombienet.sh rename to bridges/testing/framework/utils/zombienet.sh diff --git a/bridges/testing/run-new-test.sh b/bridges/testing/run-new-test.sh index 2ed2a412b8a7467d84624a47805a4d31e4d4d1a1..7c84a69aa47de84439091cb7b908233d02238175 100755 --- a/bridges/testing/run-new-test.sh +++ b/bridges/testing/run-new-test.sh @@ -21,6 +21,7 @@ do done export POLKADOT_SDK_PATH=`realpath ${BASH_SOURCE%/*}/../..` +export FRAMEWORK_PATH=`realpath ${BASH_SOURCE%/*}/framework` # set path to binaries if [ "$ZOMBIENET_DOCKER_PATHS" -eq 1 ]; then diff --git a/bridges/testing/tests/0001-asset-transfer/roc-reaches-westend.zndsl b/bridges/testing/tests/0001-asset-transfer/roc-reaches-westend.zndsl index 203c95b73eb2d3743610b84828fd12402f2399d4..a58520ccea65b50dd0db1f67a72f6f8a4c5cdb38 100644 --- a/bridges/testing/tests/0001-asset-transfer/roc-reaches-westend.zndsl +++ b/bridges/testing/tests/0001-asset-transfer/roc-reaches-westend.zndsl @@ -1,12 +1,12 @@ Description: User is able to transfer ROC from Rococo Asset Hub to Westend Asset Hub and back -Network: ../../environments/rococo-westend/bridge_hub_westend_local_network.toml +Network: {{ENV_PATH}}/bridge_hub_westend_local_network.toml Creds: config -# send ROC to //Alice from Rococo AH to Westend AH -asset-hub-westend-collator1: run ../../environments/rococo-westend/helper.sh with "reserve-transfer-assets-from-asset-hub-rococo-local" within 120 seconds +# send 5 ROC to //Alice from Rococo AH to Westend AH +asset-hub-westend-collator1: run {{ENV_PATH}}/helper.sh with "reserve-transfer-assets-from-asset-hub-rococo-local 5000000000000" within 120 seconds -# check that //Alice received the ROC on Westend AH -asset-hub-westend-collator1: js-script ../../js-helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,0,Rococo" within 300 seconds +# check that //Alice received at least 4.8 ROC on Westend AH +asset-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,4800000000000,Rococo" within 300 seconds # check that the relayer //Charlie is rewarded by Westend AH -bridge-hub-westend-collator1: js-script ../../js-helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x6268726F,ThisChain,0" within 30 seconds +bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x6268726F,ThisChain,0" within 30 seconds diff --git a/bridges/testing/tests/0001-asset-transfer/run.sh b/bridges/testing/tests/0001-asset-transfer/run.sh index 8a053ee72097417eabb8f5a4f14aba90eb172a14..a7bb122919b40187c49e89c489d2271d646bff40 100755 --- a/bridges/testing/tests/0001-asset-transfer/run.sh +++ b/bridges/testing/tests/0001-asset-transfer/run.sh @@ -2,17 +2,19 @@ set -e -source "${BASH_SOURCE%/*}/../../utils/common.sh" -source "${BASH_SOURCE%/*}/../../utils/zombienet.sh" +source "${BASH_SOURCE%/*}/../../framework/utils/common.sh" +source "${BASH_SOURCE%/*}/../../framework/utils/zombienet.sh" -${BASH_SOURCE%/*}/../../environments/rococo-westend/spawn.sh --init --start-relayer & +export ENV_PATH=`realpath ${BASH_SOURCE%/*}/../../environments/rococo-westend` + +$ENV_PATH/spawn.sh --init --start-relayer & env_pid=$! -ensure_process_file $env_pid $TEST_DIR/rococo.env 400 +ensure_process_file $env_pid $TEST_DIR/rococo.env 600 rococo_dir=`cat $TEST_DIR/rococo.env` echo -ensure_process_file $env_pid $TEST_DIR/westend.env 180 +ensure_process_file $env_pid $TEST_DIR/westend.env 300 westend_dir=`cat $TEST_DIR/westend.env` echo diff --git a/bridges/testing/tests/0001-asset-transfer/wnd-reaches-rococo.zndsl b/bridges/testing/tests/0001-asset-transfer/wnd-reaches-rococo.zndsl index bbd95db9cfda88396657d0744ef84a4dd4d92180..fedb78cc2103555a1d15c446dd2f08fca94643e1 100644 --- a/bridges/testing/tests/0001-asset-transfer/wnd-reaches-rococo.zndsl +++ b/bridges/testing/tests/0001-asset-transfer/wnd-reaches-rococo.zndsl @@ -1,12 +1,12 @@ Description: User is able to transfer WND from Westend Asset Hub to Rococo Asset Hub and back -Network: ../../environments/rococo-westend/bridge_hub_rococo_local_network.toml +Network: {{ENV_PATH}}/bridge_hub_rococo_local_network.toml Creds: config -# send WND to //Alice from Westend AH to Rococo AH -asset-hub-rococo-collator1: run ../../environments/rococo-westend/helper.sh with "reserve-transfer-assets-from-asset-hub-westend-local" within 120 seconds +# send 5 WND to //Alice from Westend AH to Rococo AH +asset-hub-rococo-collator1: run {{ENV_PATH}}/helper.sh with "reserve-transfer-assets-from-asset-hub-westend-local 5000000000000" within 120 seconds -# check that //Alice received the WND on Rococo AH -asset-hub-rococo-collator1: js-script ../../js-helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,0,Westend" within 300 seconds +# check that //Alice received at least 4.8 WND on Rococo AH +asset-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,4800000000000,Westend" within 300 seconds # check that the relayer //Charlie is rewarded by Rococo AH -bridge-hub-rococo-collator1: js-script ../../js-helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x62687764,ThisChain,0" within 30 seconds +bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x62687764,ThisChain,0" within 30 seconds diff --git a/bridges/testing/tests/0001-asset-transfer/wroc-reaches-rococo.zndsl b/bridges/testing/tests/0001-asset-transfer/wroc-reaches-rococo.zndsl index 4c0a4675234e614f6569653aa14263dac3ac3e03..68b888b6858e86b8fe846b887bc101e221b2f21d 100644 --- a/bridges/testing/tests/0001-asset-transfer/wroc-reaches-rococo.zndsl +++ b/bridges/testing/tests/0001-asset-transfer/wroc-reaches-rococo.zndsl @@ -1,10 +1,10 @@ Description: User is able to transfer ROC from Rococo Asset Hub to Westend Asset Hub and back -Network: ../../environments/rococo-westend/bridge_hub_westend_local_network.toml +Network: {{ENV_PATH}}/bridge_hub_westend_local_network.toml Creds: config -# send wROC back to Alice from Westend AH to Rococo AH -asset-hub-rococo-collator1: run ../../environments/rococo-westend/helper.sh with "withdraw-reserve-assets-from-asset-hub-westend-local" within 120 seconds +# send 3 wROC back to Alice from Westend AH to Rococo AH +asset-hub-rococo-collator1: run {{ENV_PATH}}/helper.sh with "withdraw-reserve-assets-from-asset-hub-westend-local 3000000000000" within 120 seconds -# check that //Alice received the wROC on Rococo AH +# check that //Alice received at least 2.8 wROC on Rococo AH # (we wait until //Alice account increases here - there are no other transactions that may increase it) -asset-hub-rococo-collator1: js-script ../../js-helpers/native-assets-balance-increased.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY" within 300 seconds +asset-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-assets-balance-increased.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,2800000000000" within 300 seconds diff --git a/bridges/testing/tests/0001-asset-transfer/wwnd-reaches-westend.zndsl b/bridges/testing/tests/0001-asset-transfer/wwnd-reaches-westend.zndsl index 3acded97d5cc9cd0c2177f47fae7d362058eb85a..1a8a161819542e281094aed0681d52167aaea8e6 100644 --- a/bridges/testing/tests/0001-asset-transfer/wwnd-reaches-westend.zndsl +++ b/bridges/testing/tests/0001-asset-transfer/wwnd-reaches-westend.zndsl @@ -1,10 +1,10 @@ Description: User is able to transfer ROC from Rococo Asset Hub to Westend Asset Hub and back -Network: ../../environments/rococo-westend/bridge_hub_westend_local_network.toml +Network: {{ENV_PATH}}/bridge_hub_westend_local_network.toml Creds: config -# send wWND back to Alice from Rococo AH to Westend AH -asset-hub-westend-collator1: run ../../environments/rococo-westend/helper.sh with "withdraw-reserve-assets-from-asset-hub-rococo-local" within 120 seconds +# send 3 wWND back to Alice from Rococo AH to Westend AH +asset-hub-westend-collator1: run {{ENV_PATH}}/helper.sh with "withdraw-reserve-assets-from-asset-hub-rococo-local 3000000000000" within 120 seconds -# check that //Alice received the wWND on Westend AH +# check that //Alice received at least 2.8 wWND on Westend AH # (we wait until //Alice account increases here - there are no other transactions that may increase it) -asset-hub-westend-collator1: js-script ../../js-helpers/native-assets-balance-increased.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY" within 300 seconds +asset-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-assets-balance-increased.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,2800000000000" within 300 seconds diff --git a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/rococo-to-westend.zndsl b/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/rococo-to-westend.zndsl index 82a1a103b14a7a175dff9f352517efc9269d7e36..6e381f5377329430c0d7a8723f9ea9081556bfeb 100644 --- a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/rococo-to-westend.zndsl +++ b/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/rococo-to-westend.zndsl @@ -1,8 +1,8 @@ Description: While relayer is idle, we only sync mandatory Rococo (and a single Rococo BH) headers to Westend BH. -Network: ../../environments/rococo-westend/bridge_hub_westend_local_network.toml +Network: {{ENV_PATH}}/bridge_hub_westend_local_network.toml Creds: config # ensure that relayer is only syncing mandatory headers while idle. This includes both headers that were # generated while relay was offline and those in the next 100 seconds while script is active. -bridge-hub-westend-collator1: js-script ../../js-helpers/only-mandatory-headers-synced-when-idle.js with "300,rococo-at-westend" within 600 seconds +bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/only-mandatory-headers-synced-when-idle.js with "300,rococo-at-westend" within 600 seconds diff --git a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh b/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh index 423f4a1bcc0f2a2cceb06c4d1855b168063ec990..7d5b8d9273664b0861e8ffe1c528e9e1718c4df4 100755 --- a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh +++ b/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh @@ -2,19 +2,19 @@ set -e -source "${BASH_SOURCE%/*}/../../utils/common.sh" -source "${BASH_SOURCE%/*}/../../utils/zombienet.sh" +source "${BASH_SOURCE%/*}/../../framework/utils/common.sh" +source "${BASH_SOURCE%/*}/../../framework/utils/zombienet.sh" -# We use `--relayer-delay` in order to sleep some time before starting relayer. -# We want to sleep for at least 1 session, which is expected to be 60 seconds for test environment. -${BASH_SOURCE%/*}/../../environments/rococo-westend/spawn.sh & +export ENV_PATH=`realpath ${BASH_SOURCE%/*}/../../environments/rococo-westend` + +$ENV_PATH/spawn.sh & env_pid=$! -ensure_process_file $env_pid $TEST_DIR/rococo.env 400 +ensure_process_file $env_pid $TEST_DIR/rococo.env 600 rococo_dir=`cat $TEST_DIR/rococo.env` echo -ensure_process_file $env_pid $TEST_DIR/westend.env 180 +ensure_process_file $env_pid $TEST_DIR/westend.env 300 westend_dir=`cat $TEST_DIR/westend.env` echo diff --git a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/westend-to-rococo.zndsl b/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/westend-to-rococo.zndsl index 865813246252ae7911addd603903b6b2a43bab73..b4b3e43679162feb8c3c5253f3f963d950f31d55 100644 --- a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/westend-to-rococo.zndsl +++ b/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/westend-to-rococo.zndsl @@ -1,7 +1,7 @@ Description: While relayer is idle, we only sync mandatory Westend (and a single Westend BH) headers to Rococo BH. -Network: ../../environments/rococo-westend/bridge_hub_rococo_local_network.toml +Network: {{ENV_PATH}}/bridge_hub_rococo_local_network.toml Creds: config # ensure that relayer is only syncing mandatory headers while idle. This includes both headers that were # generated while relay was offline and those in the next 100 seconds while script is active. -bridge-hub-rococo-collator1: js-script ../../js-helpers/only-mandatory-headers-synced-when-idle.js with "300,westend-at-rococo" within 600 seconds +bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/only-mandatory-headers-synced-when-idle.js with "300,westend-at-rococo" within 600 seconds diff --git a/cumulus/client/cli/src/lib.rs b/cumulus/client/cli/src/lib.rs index 1807b8a1718e8b5c800b3bf27b58e0f39cd2948a..a7b2eb19de88a5c585ec3f6dfe5ad46ef0399b88 100644 --- a/cumulus/client/cli/src/lib.rs +++ b/cumulus/client/cli/src/lib.rs @@ -30,7 +30,7 @@ use codec::Encode; use sc_chain_spec::ChainSpec; use sc_client_api::HeaderBackend; use sc_service::{ - config::{PrometheusConfig, TelemetryEndpoints}, + config::{PrometheusConfig, RpcBatchRequestConfig, TelemetryEndpoints}, BasePath, TransactionPoolOptions, }; use sp_core::hexdisplay::HexDisplay; @@ -443,6 +443,14 @@ impl sc_cli::CliConfiguration for NormalizedRunCmd { Ok(self.base.rpc_max_subscriptions_per_connection) } + fn rpc_buffer_capacity_per_connection(&self) -> sc_cli::Result { + Ok(self.base.rpc_message_buffer_capacity_per_connection) + } + + fn rpc_batch_config(&self) -> sc_cli::Result { + self.base.rpc_batch_config() + } + fn transaction_pool(&self, is_dev: bool) -> sc_cli::Result { self.base.transaction_pool(is_dev) } diff --git a/cumulus/client/consensus/aura/src/collators/basic.rs b/cumulus/client/consensus/aura/src/collators/basic.rs index 8740b06005d65d53747dacdb6499370962ffe0a4..52b83254951f0e0ba0fd9ad5420d7faca2402066 100644 --- a/cumulus/client/consensus/aura/src/collators/basic.rs +++ b/cumulus/client/consensus/aura/src/collators/basic.rs @@ -33,12 +33,12 @@ use cumulus_relay_chain_interface::RelayChainInterface; use polkadot_node_primitives::CollationResult; use polkadot_overseer::Handle as OverseerHandle; -use polkadot_primitives::{CollatorPair, Id as ParaId}; +use polkadot_primitives::{CollatorPair, Id as ParaId, ValidationCode}; use futures::{channel::mpsc::Receiver, prelude::*}; use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf}; use sc_consensus::BlockImport; -use sp_api::ProvideRuntimeApi; +use sp_api::{CallApiAt, ProvideRuntimeApi}; use sp_application_crypto::AppPublic; use sp_blockchain::HeaderBackend; use sp_consensus::SyncOracle; @@ -47,6 +47,7 @@ use sp_core::crypto::Pair; use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Member}; +use sp_state_machine::Backend as _; use std::{convert::TryFrom, sync::Arc, time::Duration}; use crate::collator as collator_util; @@ -100,6 +101,7 @@ where + AuxStore + HeaderBackend + BlockBackend + + CallApiAt + Send + Sync + 'static, @@ -172,6 +174,22 @@ where continue } + let Ok(Some(code)) = + params.para_client.state_at(parent_hash).map_err(drop).and_then(|s| { + s.storage(&sp_core::storage::well_known_keys::CODE).map_err(drop) + }) + else { + continue; + }; + + super::check_validation_code_or_log( + &ValidationCode::from(code).hash(), + params.para_id, + ¶ms.relay_client, + *request.relay_parent(), + ) + .await; + let relay_parent_header = match params.relay_client.header(RBlockId::hash(*request.relay_parent())).await { Err(e) => reject_with_error!(e), diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs index a9f33173d832ab96edd0ada3f123647fb9a967d6..161f10d55a193de35a2585e1a1f5725f30e19bf7 100644 --- a/cumulus/client/consensus/aura/src/collators/lookahead.rs +++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs @@ -290,10 +290,7 @@ where // If the longest chain has space, build upon that. Otherwise, don't // build at all. potential_parents.sort_by_key(|a| a.depth); - let initial_parent = match potential_parents.pop() { - None => continue, - Some(p) => p, - }; + let Some(initial_parent) = potential_parents.pop() else { continue }; // Build in a loop until not allowed. Note that the authorities can change // at any block, so we need to re-claim our slot every time. @@ -301,6 +298,10 @@ where let mut parent_header = initial_parent.header; let overseer_handle = &mut params.overseer_handle; + // We mainly call this to inform users at genesis if there is a mismatch with the + // on-chain data. + collator.collator_service().check_block_status(parent_hash, &parent_header); + // This needs to change to support elastic scaling, but for continuously // scheduled chains this ensures that the backlog will grow steadily. for n_built in 0..2 { @@ -353,6 +354,14 @@ where Some(v) => v, }; + super::check_validation_code_or_log( + &validation_code_hash, + params.para_id, + ¶ms.relay_client, + relay_parent, + ) + .await; + match collator .collate( &parent_header, diff --git a/cumulus/client/consensus/aura/src/collators/mod.rs b/cumulus/client/consensus/aura/src/collators/mod.rs index 4c7b759daf736f69de48b586b082a7d01534d7e3..6e0067d0cedb602face8943737f99f3cb1a201a3 100644 --- a/cumulus/client/consensus/aura/src/collators/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/mod.rs @@ -20,5 +20,60 @@ //! included parachain block, as well as the [`lookahead`] collator, which prospectively //! builds on parachain blocks which have not yet been included in the relay chain. +use cumulus_relay_chain_interface::RelayChainInterface; +use polkadot_primitives::{ + Hash as RHash, Id as ParaId, OccupiedCoreAssumption, ValidationCodeHash, +}; + pub mod basic; pub mod lookahead; + +/// Check the `local_validation_code_hash` against the validation code hash in the relay chain +/// state. +/// +/// If the code hashes do not match, it prints a warning. +async fn check_validation_code_or_log( + local_validation_code_hash: &ValidationCodeHash, + para_id: ParaId, + relay_client: &impl RelayChainInterface, + relay_parent: RHash, +) { + let state_validation_code_hash = match relay_client + .validation_code_hash(relay_parent, para_id, OccupiedCoreAssumption::Included) + .await + { + Ok(hash) => hash, + Err(error) => { + tracing::debug!( + target: super::LOG_TARGET, + %error, + ?relay_parent, + %para_id, + "Failed to fetch validation code hash", + ); + return + }, + }; + + match state_validation_code_hash { + Some(state) => + if state != *local_validation_code_hash { + tracing::warn!( + target: super::LOG_TARGET, + %para_id, + ?relay_parent, + ?local_validation_code_hash, + relay_validation_code_hash = ?state, + "Parachain code doesn't match validation code stored in the relay chain state", + ); + }, + None => { + tracing::warn!( + target: super::LOG_TARGET, + %para_id, + ?relay_parent, + "Could not find validation code for parachain in the relay chain state.", + ); + }, + } +} diff --git a/cumulus/client/consensus/aura/src/lib.rs b/cumulus/client/consensus/aura/src/lib.rs index 8e4bc658e44bfdc173f3fdb66518da883edcb05b..ed6f5bdd4d6984350c5f59a3753618c3a038f323 100644 --- a/cumulus/client/consensus/aura/src/lib.rs +++ b/cumulus/client/consensus/aura/src/lib.rs @@ -54,7 +54,10 @@ use std::{ mod import_queue; pub use import_queue::{build_verifier, import_queue, BuildVerifierParams, ImportQueueParams}; -pub use sc_consensus_aura::{slot_duration, AuraVerifier, BuildAuraWorkerParams, SlotProportion}; +pub use sc_consensus_aura::{ + slot_duration, standalone::slot_duration_at, AuraVerifier, BuildAuraWorkerParams, + SlotProportion, +}; pub use sc_consensus_slots::InherentDataProviderExt; pub mod collator; diff --git a/cumulus/client/consensus/common/src/tests.rs b/cumulus/client/consensus/common/src/tests.rs index 597d1ab2acc2cff42d3230898c1129a7ba63b6f3..bfb95ae388ae3cd31f5035a9c6195631adbb8809 100644 --- a/cumulus/client/consensus/common/src/tests.rs +++ b/cumulus/client/consensus/common/src/tests.rs @@ -136,6 +136,15 @@ impl RelayChainInterface for Relaychain { Ok(Some(PersistedValidationData { parent_head, ..Default::default() })) } + async fn validation_code_hash( + &self, + _: PHash, + _: ParaId, + _: OccupiedCoreAssumption, + ) -> RelayChainResult> { + unimplemented!("Not needed for test") + } + async fn candidate_pending_availability( &self, _: PHash, diff --git a/cumulus/client/consensus/proposer/Cargo.toml b/cumulus/client/consensus/proposer/Cargo.toml index 8a559c603f33c138912cfc096e2d90cc879b597e..b37232bb4485d6f5ece63a3c940bd065c1d3f083 100644 --- a/cumulus/client/consensus/proposer/Cargo.toml +++ b/cumulus/client/consensus/proposer/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] anyhow = "1.0" async-trait = "0.1.74" -thiserror = "1.0.48" +thiserror = { workspace = true } # Substrate sp-consensus = { path = "../../../../substrate/primitives/consensus/common" } diff --git a/cumulus/client/network/src/tests.rs b/cumulus/client/network/src/tests.rs index e03f470753bb6c32c4410a7c694dea8312c74c31..d986635f961c914c9ec6fa970b20170f9f8b9cbc 100644 --- a/cumulus/client/network/src/tests.rs +++ b/cumulus/client/network/src/tests.rs @@ -117,6 +117,15 @@ impl RelayChainInterface for DummyRelayChainInterface { })) } + async fn validation_code_hash( + &self, + _: PHash, + _: ParaId, + _: OccupiedCoreAssumption, + ) -> RelayChainResult> { + unimplemented!("Not needed for test") + } + async fn candidate_pending_availability( &self, _: PHash, diff --git a/cumulus/client/parachain-inherent/src/lib.rs b/cumulus/client/parachain-inherent/src/lib.rs index 57353638e197acf255b73080c6802e94647c2090..051eb6764c8ce97a8305a07bdddb8f06f61fe4bc 100644 --- a/cumulus/client/parachain-inherent/src/lib.rs +++ b/cumulus/client/parachain-inherent/src/lib.rs @@ -159,7 +159,7 @@ impl ParachainInherentDataProvider { target: LOG_TARGET, relay_parent = ?relay_parent, error = ?e, - "An error occured during requesting the downward messages.", + "An error occurred during requesting the downward messages.", ); }) .ok()?; @@ -171,7 +171,7 @@ impl ParachainInherentDataProvider { target: LOG_TARGET, relay_parent = ?relay_parent, error = ?e, - "An error occured during requesting the inbound HRMP messages.", + "An error occurred during requesting the inbound HRMP messages.", ); }) .ok()?; diff --git a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs index 866214fe2c526d77153afbea74b130aa1fda8636..6ea02b2e7c1f6d9b5313459890dd2147015359e5 100644 --- a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs +++ b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs @@ -21,7 +21,7 @@ use cumulus_primitives_core::{ relay_chain::{ runtime_api::ParachainHost, Block as PBlock, BlockId, CommittedCandidateReceipt, Hash as PHash, Header as PHeader, InboundHrmpMessage, OccupiedCoreAssumption, SessionIndex, - ValidatorId, + ValidationCodeHash, ValidatorId, }, InboundDownwardMessage, ParaId, PersistedValidationData, }; @@ -115,6 +115,19 @@ impl RelayChainInterface for RelayChainInProcessInterface { )?) } + async fn validation_code_hash( + &self, + hash: PHash, + para_id: ParaId, + occupied_core_assumption: OccupiedCoreAssumption, + ) -> RelayChainResult> { + Ok(self.full_client.runtime_api().validation_code_hash( + hash, + para_id, + occupied_core_assumption, + )?) + } + async fn candidate_pending_availability( &self, hash: PHash, diff --git a/cumulus/client/relay-chain-interface/Cargo.toml b/cumulus/client/relay-chain-interface/Cargo.toml index 5cdfff23ae17088abff69ac88dfc7224e579e157..6e652b892104e929e5e0a5bb7ce8cf33d364e8e6 100644 --- a/cumulus/client/relay-chain-interface/Cargo.toml +++ b/cumulus/client/relay-chain-interface/Cargo.toml @@ -21,6 +21,6 @@ sc-client-api = { path = "../../../substrate/client/api" } futures = "0.3.28" async-trait = "0.1.74" -thiserror = "1.0.48" +thiserror = { workspace = true } jsonrpsee-core = "0.22" parity-scale-codec = "3.6.4" diff --git a/cumulus/client/relay-chain-interface/src/lib.rs b/cumulus/client/relay-chain-interface/src/lib.rs index de5e7891b30da0d5eaf67c5acc80be48944c1399..bb93e6a168c849fa9d41586ad8b9ec0013e6c01f 100644 --- a/cumulus/client/relay-chain-interface/src/lib.rs +++ b/cumulus/client/relay-chain-interface/src/lib.rs @@ -30,7 +30,7 @@ use cumulus_primitives_core::relay_chain::BlockId; pub use cumulus_primitives_core::{ relay_chain::{ CommittedCandidateReceipt, Hash as PHash, Header as PHeader, InboundHrmpMessage, - OccupiedCoreAssumption, SessionIndex, ValidatorId, + OccupiedCoreAssumption, SessionIndex, ValidationCodeHash, ValidatorId, }, InboundDownwardMessage, ParaId, PersistedValidationData, }; @@ -194,6 +194,15 @@ pub trait RelayChainInterface: Send + Sync { relay_parent: PHash, relevant_keys: &Vec>, ) -> RelayChainResult; + + /// Returns the validation code hash for the given `para_id` using the given + /// `occupied_core_assumption`. + async fn validation_code_hash( + &self, + relay_parent: PHash, + para_id: ParaId, + occupied_core_assumption: OccupiedCoreAssumption, + ) -> RelayChainResult>; } #[async_trait] @@ -301,4 +310,15 @@ where async fn header(&self, block_id: BlockId) -> RelayChainResult> { (**self).header(block_id).await } + + async fn validation_code_hash( + &self, + relay_parent: PHash, + para_id: ParaId, + occupied_core_assumption: OccupiedCoreAssumption, + ) -> RelayChainResult> { + (**self) + .validation_code_hash(relay_parent, para_id, occupied_core_assumption) + .await + } } diff --git a/cumulus/client/relay-chain-minimal-node/Cargo.toml b/cumulus/client/relay-chain-minimal-node/Cargo.toml index 0b1e001e007a493f521f10c2659653699da04d0c..98240c92adab38ea103f14699e1e49f7449b3d24 100644 --- a/cumulus/client/relay-chain-minimal-node/Cargo.toml +++ b/cumulus/client/relay-chain-minimal-node/Cargo.toml @@ -24,6 +24,7 @@ polkadot-node-collation-generation = { path = "../../../polkadot/node/collation- polkadot-node-core-runtime-api = { path = "../../../polkadot/node/core/runtime-api" } polkadot-node-core-chain-api = { path = "../../../polkadot/node/core/chain-api" } polkadot-node-core-prospective-parachains = { path = "../../../polkadot/node/core/prospective-parachains" } +polkadot-service = { path = "../../../polkadot/node/service" } # substrate deps sc-authority-discovery = { path = "../../../substrate/client/authority-discovery" } diff --git a/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs b/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs index 5f5bf338ef9907756adb1eab3f0541e870677fe5..f01ef8b05ecba77cf6458fdfe80e966b3a67e6a3 100644 --- a/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs +++ b/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs @@ -15,159 +15,29 @@ // along with Polkadot. If not, see . use futures::{select, StreamExt}; -use parking_lot::Mutex; -use std::{collections::HashMap, sync::Arc}; +use std::sync::Arc; -use polkadot_availability_recovery::AvailabilityRecoverySubsystem; -use polkadot_collator_protocol::{CollatorProtocolSubsystem, ProtocolSide}; -use polkadot_network_bridge::{ - Metrics as NetworkBridgeMetrics, NetworkBridgeRx as NetworkBridgeRxSubsystem, - NetworkBridgeTx as NetworkBridgeTxSubsystem, -}; -use polkadot_node_collation_generation::CollationGenerationSubsystem; -use polkadot_node_core_chain_api::ChainApiSubsystem; -use polkadot_node_core_prospective_parachains::ProspectiveParachainsSubsystem; -use polkadot_node_core_runtime_api::RuntimeApiSubsystem; -use polkadot_node_network_protocol::{ - peer_set::{PeerSet, PeerSetProtocolNames}, - request_response::{ - v1::{self, AvailableDataFetchingRequest}, - v2, IncomingRequestReceiver, ReqProtocolNames, - }, -}; -use polkadot_node_subsystem_util::metrics::{prometheus::Registry, Metrics}; use polkadot_overseer::{ - BlockInfo, DummySubsystem, Handle, Overseer, OverseerConnector, OverseerHandle, SpawnGlue, - UnpinHandle, + BlockInfo, Handle, Overseer, OverseerConnector, OverseerHandle, SpawnGlue, UnpinHandle, }; -use polkadot_primitives::CollatorPair; +use polkadot_service::overseer::{collator_overseer_builder, OverseerGenArgs}; -use sc_authority_discovery::Service as AuthorityDiscoveryService; -use sc_network::{NetworkStateInfo, NotificationService}; use sc_service::TaskManager; use sc_utils::mpsc::tracing_unbounded; -use cumulus_primitives_core::relay_chain::{Block, Hash as PHash}; use cumulus_relay_chain_interface::RelayChainError; use crate::BlockChainRpcClient; -/// Arguments passed for overseer construction. -pub(crate) struct CollatorOverseerGenArgs<'a> { - /// Runtime client generic, providing the `ProvieRuntimeApi` trait besides others. - pub runtime_client: Arc, - /// Underlying network service implementation. - pub network_service: Arc>, - /// Syncing oracle. - pub sync_oracle: Box, - /// Underlying authority discovery service. - pub authority_discovery_service: AuthorityDiscoveryService, - /// Receiver for collation request protocol v1. - pub collation_req_receiver_v1: IncomingRequestReceiver, - /// Receiver for collation request protocol v2. - pub collation_req_receiver_v2: IncomingRequestReceiver, - /// Receiver for availability request protocol - pub available_data_req_receiver: IncomingRequestReceiver, - /// Prometheus registry, commonly used for production systems, less so for test. - pub registry: Option<&'a Registry>, - /// Task spawner to be used throughout the overseer and the APIs it provides. - pub spawner: sc_service::SpawnTaskHandle, - /// Determines the behavior of the collator. - pub collator_pair: CollatorPair, - /// Request response protocols - pub req_protocol_names: ReqProtocolNames, - /// Peerset protocols name mapping - pub peer_set_protocol_names: PeerSetProtocolNames, - /// Notification services for validation/collation protocols. - pub notification_services: HashMap>, -} - fn build_overseer( connector: OverseerConnector, - CollatorOverseerGenArgs { - runtime_client, - network_service, - sync_oracle, - authority_discovery_service, - collation_req_receiver_v1, - collation_req_receiver_v2, - available_data_req_receiver, - registry, - spawner, - collator_pair, - req_protocol_names, - peer_set_protocol_names, - notification_services, - }: CollatorOverseerGenArgs<'_>, + args: OverseerGenArgs, ) -> Result< (Overseer, Arc>, OverseerHandle), RelayChainError, > { - let spawner = SpawnGlue(spawner); - let network_bridge_metrics: NetworkBridgeMetrics = Metrics::register(registry)?; - let notification_sinks = Arc::new(Mutex::new(HashMap::new())); - - let builder = Overseer::builder() - .availability_distribution(DummySubsystem) - .availability_recovery(AvailabilityRecoverySubsystem::for_collator( - available_data_req_receiver, - Metrics::register(registry)?, - )) - .availability_store(DummySubsystem) - .bitfield_distribution(DummySubsystem) - .bitfield_signing(DummySubsystem) - .candidate_backing(DummySubsystem) - .candidate_validation(DummySubsystem) - .pvf_checker(DummySubsystem) - .chain_api(ChainApiSubsystem::new(runtime_client.clone(), Metrics::register(registry)?)) - .collation_generation(CollationGenerationSubsystem::new(Metrics::register(registry)?)) - .collator_protocol({ - let side = ProtocolSide::Collator { - peer_id: network_service.local_peer_id(), - collator_pair, - request_receiver_v1: collation_req_receiver_v1, - request_receiver_v2: collation_req_receiver_v2, - metrics: Metrics::register(registry)?, - }; - CollatorProtocolSubsystem::new(side) - }) - .network_bridge_rx(NetworkBridgeRxSubsystem::new( - network_service.clone(), - authority_discovery_service.clone(), - sync_oracle, - network_bridge_metrics.clone(), - peer_set_protocol_names.clone(), - notification_services, - notification_sinks.clone(), - )) - .network_bridge_tx(NetworkBridgeTxSubsystem::new( - network_service, - authority_discovery_service, - network_bridge_metrics, - req_protocol_names, - peer_set_protocol_names, - notification_sinks, - )) - .provisioner(DummySubsystem) - .runtime_api(RuntimeApiSubsystem::new( - runtime_client.clone(), - Metrics::register(registry)?, - spawner.clone(), - )) - .statement_distribution(DummySubsystem) - .prospective_parachains(ProspectiveParachainsSubsystem::new(Metrics::register(registry)?)) - .approval_distribution(DummySubsystem) - .approval_voting(DummySubsystem) - .gossip_support(DummySubsystem) - .dispute_coordinator(DummySubsystem) - .dispute_distribution(DummySubsystem) - .chain_selection(DummySubsystem) - .activation_external_listeners(Default::default()) - .span_per_active_leaf(Default::default()) - .active_leaves(Default::default()) - .supports_parachains(runtime_client) - .metrics(Metrics::register(registry)?) - .spawner(spawner); + let builder = + collator_overseer_builder(args).map_err(|e| RelayChainError::Application(e.into()))?; builder .build_with_connector(connector) @@ -175,7 +45,7 @@ fn build_overseer( } pub(crate) fn spawn_overseer( - overseer_args: CollatorOverseerGenArgs, + overseer_args: OverseerGenArgs, task_manager: &TaskManager, relay_chain_rpc_client: Arc, ) -> Result { diff --git a/cumulus/client/relay-chain-minimal-node/src/lib.rs b/cumulus/client/relay-chain-minimal-node/src/lib.rs index d121d2d3356765d9327fdaa0a8c0563c3917266f..4bccca59fe3ea2eec816513778885f43c9504d51 100644 --- a/cumulus/client/relay-chain-minimal-node/src/lib.rs +++ b/cumulus/client/relay-chain-minimal-node/src/lib.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use collator_overseer::{CollatorOverseerGenArgs, NewMinimalNode}; +use collator_overseer::NewMinimalNode; use cumulus_relay_chain_interface::{RelayChainError, RelayChainInterface, RelayChainResult}; use cumulus_relay_chain_rpc_interface::{RelayChainRpcClient, RelayChainRpcInterface, Url}; @@ -29,6 +29,7 @@ use polkadot_node_network_protocol::{ use polkadot_node_subsystem_util::metrics::prometheus::Registry; use polkadot_primitives::CollatorPair; +use polkadot_service::{overseer::OverseerGenArgs, IsParachainNode}; use sc_authority_discovery::Service as AuthorityDiscoveryService; use sc_network::{config::FullNetworkConfiguration, Event, NetworkEventStream, NetworkService}; @@ -172,10 +173,10 @@ async fn new_minimal_relay_chain( } let genesis_hash = relay_chain_rpc_client.block_get_hash(Some(0)).await?.unwrap_or_default(); - let peer_set_protocol_names = + let peerset_protocol_names = PeerSetProtocolNames::new(genesis_hash, config.chain_spec.fork_id()); let is_authority = if role.is_authority() { IsAuthority::Yes } else { IsAuthority::No }; - let notification_services = peer_sets_info(is_authority, &peer_set_protocol_names) + let notification_services = peer_sets_info(is_authority, &peerset_protocol_names) .into_iter() .map(|(config, (peerset, service))| { net_config.add_notification_protocol(config); @@ -184,14 +185,14 @@ async fn new_minimal_relay_chain( .collect::>>(); let request_protocol_names = ReqProtocolNames::new(genesis_hash, config.chain_spec.fork_id()); - let (collation_req_receiver_v1, collation_req_receiver_v2, available_data_req_receiver) = + let (collation_req_v1_receiver, collation_req_v2_receiver, available_data_req_receiver) = build_request_response_protocol_receivers(&request_protocol_names, &mut net_config); let best_header = relay_chain_rpc_client .chain_get_header(None) .await? .ok_or_else(|| RelayChainError::RpcCallError("Unable to fetch best header".to_string()))?; - let (network, network_starter, sync_oracle) = build_collator_network( + let (network, network_starter, sync_service) = build_collator_network( &config, net_config, task_manager.spawn_handle(), @@ -208,19 +209,20 @@ async fn new_minimal_relay_chain( prometheus_registry.cloned(), ); - let overseer_args = CollatorOverseerGenArgs { + let overseer_args = OverseerGenArgs { runtime_client: relay_chain_rpc_client.clone(), network_service: network, - sync_oracle, + sync_service, authority_discovery_service, - collation_req_receiver_v1, - collation_req_receiver_v2, + collation_req_v1_receiver, + collation_req_v2_receiver, available_data_req_receiver, registry: prometheus_registry, spawner: task_manager.spawn_handle(), - collator_pair, + is_parachain_node: IsParachainNode::Collator(collator_pair), + overseer_message_channel_capacity_override: None, req_protocol_names: request_protocol_names, - peer_set_protocol_names, + peerset_protocol_names, notification_services, }; @@ -240,10 +242,10 @@ fn build_request_response_protocol_receivers( IncomingRequestReceiver, IncomingRequestReceiver, ) { - let (collation_req_receiver_v1, cfg) = + let (collation_req_v1_receiver, cfg) = IncomingRequest::get_config_receiver(request_protocol_names); config.add_request_response_protocol(cfg); - let (collation_req_receiver_v2, cfg) = + let (collation_req_v2_receiver, cfg) = IncomingRequest::get_config_receiver(request_protocol_names); config.add_request_response_protocol(cfg); let (available_data_req_receiver, cfg) = @@ -251,5 +253,5 @@ fn build_request_response_protocol_receivers( config.add_request_response_protocol(cfg); let cfg = Protocol::ChunkFetchingV1.get_outbound_only_config(request_protocol_names); config.add_request_response_protocol(cfg); - (collation_req_receiver_v1, collation_req_receiver_v2, available_data_req_receiver) + (collation_req_v1_receiver, collation_req_v2_receiver, available_data_req_receiver) } diff --git a/cumulus/client/relay-chain-minimal-node/src/network.rs b/cumulus/client/relay-chain-minimal-node/src/network.rs index 95785063c1aeb6649d7154fa39e4e111e226def3..7286fab7907cb6d475b81a4dfd97c5d001180873 100644 --- a/cumulus/client/relay-chain-minimal-node/src/network.rs +++ b/cumulus/client/relay-chain-minimal-node/src/network.rs @@ -40,7 +40,11 @@ pub(crate) fn build_collator_network( genesis_hash: Hash, best_header: Header, ) -> Result< - (Arc>, NetworkStarter, Box), + ( + Arc>, + NetworkStarter, + Arc, + ), Error, > { let protocol_id = config.protocol_id(); @@ -112,7 +116,7 @@ pub(crate) fn build_collator_network( let network_starter = NetworkStarter::new(network_start_tx); - Ok((network_service, network_starter, Box::new(SyncOracle {}))) + Ok((network_service, network_starter, Arc::new(SyncOracle {}))) } fn adjust_network_config_light_in_peers(config: &mut NetworkConfiguration) { diff --git a/cumulus/client/relay-chain-rpc-interface/Cargo.toml b/cumulus/client/relay-chain-rpc-interface/Cargo.toml index 1534537e0d428d9cf547f7b39ecb7032f8847c87..801712b1ad150a8a63e85c40ec0854a62b5969bc 100644 --- a/cumulus/client/relay-chain-rpc-interface/Cargo.toml +++ b/cumulus/client/relay-chain-rpc-interface/Cargo.toml @@ -37,12 +37,12 @@ jsonrpsee = { version = "0.22", features = ["ws-client"] } tracing = "0.1.37" async-trait = "0.1.74" url = "2.4.0" -serde_json = "1.0.113" -serde = "1.0.196" +serde_json = { workspace = true, default-features = true } +serde = { workspace = true, default-features = true } schnellru = "0.2.1" smoldot = { version = "0.11.0", default_features = false, features = ["std"] } smoldot-light = { version = "0.9.0", default_features = false, features = ["std"] } either = "1.8.1" -thiserror = "1.0.48" +thiserror = { workspace = true } rand = "0.8.5" pin-project = "1.1.3" diff --git a/cumulus/client/relay-chain-rpc-interface/src/lib.rs b/cumulus/client/relay-chain-rpc-interface/src/lib.rs index 96f8fc8b5563335eb7796bc8fd105fced15bc5e1..3a4c186e301eab295aa3befbd3c5549636fdb2c0 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/lib.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/lib.rs @@ -19,7 +19,7 @@ use core::time::Duration; use cumulus_primitives_core::{ relay_chain::{ CommittedCandidateReceipt, Hash as RelayHash, Header as RelayHeader, InboundHrmpMessage, - OccupiedCoreAssumption, SessionIndex, ValidatorId, + OccupiedCoreAssumption, SessionIndex, ValidationCodeHash, ValidatorId, }, InboundDownwardMessage, ParaId, PersistedValidationData, }; @@ -110,6 +110,17 @@ impl RelayChainInterface for RelayChainRpcInterface { .await } + async fn validation_code_hash( + &self, + hash: RelayHash, + para_id: ParaId, + occupied_core_assumption: OccupiedCoreAssumption, + ) -> RelayChainResult> { + self.rpc_client + .validation_code_hash(hash, para_id, occupied_core_assumption) + .await + } + async fn candidate_pending_availability( &self, hash: RelayHash, diff --git a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs index a912997e947d19a4c1063c36b0f03dcf32bf12f1..6578210a259c956b1c817f579f66e84258a8ab3e 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs @@ -647,6 +647,20 @@ impl RelayChainRpcClient { .await } + pub async fn validation_code_hash( + &self, + at: RelayHash, + para_id: ParaId, + occupied_core_assumption: OccupiedCoreAssumption, + ) -> Result, RelayChainError> { + self.call_remote_runtime_function( + "ParachainHost_validation_code_hash", + at, + Some((para_id, occupied_core_assumption)), + ) + .await + } + fn send_register_message_to_worker( &self, message: RpcDispatcherMessage, diff --git a/cumulus/pallets/aura-ext/src/lib.rs b/cumulus/pallets/aura-ext/src/lib.rs index 34a41557152d8df38e0abc2d612151b2f74dc779..31b571816a0c15837c6175ab233204565cc7eb36 100644 --- a/cumulus/pallets/aura-ext/src/lib.rs +++ b/cumulus/pallets/aura-ext/src/lib.rs @@ -117,12 +117,6 @@ pub mod pallet { impl BuildGenesisConfig for GenesisConfig { fn build(&self) { let authorities = Aura::::authorities(); - - assert!( - !authorities.is_empty(), - "AuRa authorities empty, maybe wrong order in `construct_runtime!`?", - ); - Authorities::::put(authorities); } } diff --git a/cumulus/pallets/collator-selection/src/benchmarking.rs b/cumulus/pallets/collator-selection/src/benchmarking.rs index 2c40f4dd0eac4a8a2ad3aed7f162a4379ccdbc93..e2af74a6e60ef00ca9d1111518d86df3fdfb0656 100644 --- a/cumulus/pallets/collator-selection/src/benchmarking.rs +++ b/cumulus/pallets/collator-selection/src/benchmarking.rs @@ -22,9 +22,7 @@ use super::*; #[allow(unused)] use crate::Pallet as CollatorSelection; use codec::Decode; -use frame_benchmarking::{ - account, impl_benchmark_test_suite, v2::*, whitelisted_caller, BenchmarkError, -}; +use frame_benchmarking::{account, v2::*, whitelisted_caller, BenchmarkError}; use frame_support::traits::{Currency, EnsureOrigin, Get, ReservableCurrency}; use frame_system::{pallet_prelude::BlockNumberFor, EventRecord, RawOrigin}; use pallet_authorship::EventHandler; diff --git a/cumulus/pallets/collator-selection/src/lib.rs b/cumulus/pallets/collator-selection/src/lib.rs index 7449f4d68c7eacc8b07fa45f2f991c13f3d5713b..62c00737f91a4aac25f2e1972a0c5aade5e0eed2 100644 --- a/cumulus/pallets/collator-selection/src/lib.rs +++ b/cumulus/pallets/collator-selection/src/lib.rs @@ -118,7 +118,7 @@ pub mod pallet { use sp_staking::SessionIndex; use sp_std::vec::Vec; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); type BalanceOf = @@ -495,7 +495,11 @@ pub mod pallet { }) .unwrap_or_default(); Self::deposit_event(Event::NewCandidacyBond { bond_amount: bond }); - Ok(Some(T::WeightInfo::set_candidacy_bond(initial_len as u32, kicked as u32)).into()) + Ok(Some(T::WeightInfo::set_candidacy_bond( + bond_increased.then(|| initial_len as u32).unwrap_or_default(), + kicked as u32, + )) + .into()) } /// Register this account as a collator candidate. The account must (a) already have diff --git a/cumulus/pallets/collator-selection/src/migration.rs b/cumulus/pallets/collator-selection/src/migration.rs index 58b4cc5b06a1ab7da17b31ad3c457fd992c59136..f384981dbae8e034a4465ac0f226fb4422be8089 100644 --- a/cumulus/pallets/collator-selection/src/migration.rs +++ b/cumulus/pallets/collator-selection/src/migration.rs @@ -31,8 +31,8 @@ pub mod v1 { pub struct MigrateToV1(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV1 { fn on_runtime_upgrade() -> Weight { - let onchain_version = Pallet::::on_chain_storage_version(); - if onchain_version == 0 { + let on_chain_version = Pallet::::on_chain_storage_version(); + if on_chain_version == 0 { let invulnerables_len = Invulnerables::::get().to_vec().len(); >::mutate(|invulnerables| { invulnerables.sort(); @@ -45,7 +45,7 @@ pub mod v1 { invulnerables_len, ); // Similar complexity to `set_invulnerables` (put storage value) - // Plus 1 read for length, 1 read for `onchain_version`, 1 write to put version + // Plus 1 read for length, 1 read for `on_chain_version`, 1 write to put version T::WeightInfo::set_invulnerables(invulnerables_len as u32) .saturating_add(T::DbWeight::get().reads_writes(2, 1)) } else { @@ -83,8 +83,8 @@ pub mod v1 { "after migration, there should be the same number of invulnerables" ); - let onchain_version = Pallet::::on_chain_storage_version(); - frame_support::ensure!(onchain_version >= 1, "must_upgrade"); + let on_chain_version = Pallet::::on_chain_storage_version(); + frame_support::ensure!(on_chain_version >= 1, "must_upgrade"); Ok(()) } diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index 86647290563cb0379f1acce5090e3fc79da34f1e..7e0442f0b5856fa5153e29ff497cfee876c067ec 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -36,6 +36,7 @@ sp-version = { path = "../../../substrate/primitives/version", default-features # Polkadot polkadot-parachain-primitives = { path = "../../../polkadot/parachain", default-features = false, features = ["wasm-api"] } polkadot-runtime-parachains = { path = "../../../polkadot/runtime/parachains", default-features = false } +polkadot-runtime-common = { path = "../../../polkadot/runtime/common", default-features = false, optional = true } xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } # Cumulus @@ -79,6 +80,7 @@ std = [ "log/std", "pallet-message-queue/std", "polkadot-parachain-primitives/std", + "polkadot-runtime-common/std", "polkadot-runtime-parachains/std", "scale-info/std", "sp-core/std", @@ -102,6 +104,7 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "pallet-message-queue/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", + "polkadot-runtime-common/runtime-benchmarks", "polkadot-runtime-parachains/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] @@ -110,6 +113,7 @@ try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", "pallet-message-queue/try-runtime", + "polkadot-runtime-common?/try-runtime", "polkadot-runtime-parachains/try-runtime", "sp-runtime/try-runtime", ] diff --git a/cumulus/pallets/parachain-system/proc-macro/Cargo.toml b/cumulus/pallets/parachain-system/proc-macro/Cargo.toml index 5173745273981d41b3cfca429091b004653e835c..0a90c30e0331261026125f429efe70eff07ac069 100644 --- a/cumulus/pallets/parachain-system/proc-macro/Cargo.toml +++ b/cumulus/pallets/parachain-system/proc-macro/Cargo.toml @@ -13,9 +13,9 @@ workspace = true proc-macro = true [dependencies] -syn = "2.0.49" +syn = { workspace = true } proc-macro2 = "1.0.64" -quote = "1.0.33" +quote = { workspace = true } proc-macro-crate = "3.0.0" [features] diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 5a0fa57fb171c60f13b87d70305a6d58dad475a1..81965a2a313875fd8783cf9e22ca3dd652f688c3 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -205,6 +205,7 @@ pub mod pallet { type OnSystemEvent: OnSystemEvent; /// Returns the parachain ID we are running with. + #[pallet::constant] type SelfParaId: Get; /// The place where outbound XCMP messages come from. This is queried in `finalize_block`. @@ -840,6 +841,7 @@ pub mod pallet { /// /// This data is also absent from the genesis. #[pallet::storage] + #[pallet::disable_try_decode_storage] #[pallet::getter(fn host_configuration)] pub(super) type HostConfiguration = StorageValue<_, AbridgedHostConfiguration>; @@ -1610,6 +1612,15 @@ impl UpwardMessageSender for Pallet { } } +#[cfg(feature = "runtime-benchmarks")] +impl polkadot_runtime_common::xcm_sender::EnsureForParachain for Pallet { + fn ensure(para_id: ParaId) { + if let ChannelStatus::Closed = Self::get_channel_status(para_id) { + Self::open_outbound_hrmp_channel_for_benchmarks_or_tests(para_id) + } + } +} + /// Something that can check the inherents of a block. #[cfg_attr( feature = "parameterized-consensus-hook", diff --git a/cumulus/pallets/parachain-system/src/migration.rs b/cumulus/pallets/parachain-system/src/migration.rs index a92f85b9cd420e9e1027d1f82c643d90fea8b97c..30106aceab5a442c34360563fdfb096fdc6ac9b1 100644 --- a/cumulus/pallets/parachain-system/src/migration.rs +++ b/cumulus/pallets/parachain-system/src/migration.rs @@ -21,7 +21,7 @@ use frame_support::{ weights::Weight, }; -/// The current storage version. +/// The in-code storage version. pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(2); /// Migrates the pallet storage to the most recent version. diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index ce3b724420f1c9e50b65a81074e023c6b735227b..ecab7a9a09311ae0f952a7842e9f1826b00adc69 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -16,7 +16,7 @@ //! The actual implementation of the validate block functionality. -use super::{trie_cache, MemoryOptimizedValidationParams}; +use super::{trie_cache, trie_recorder, MemoryOptimizedValidationParams}; use cumulus_primitives_core::{ relay_chain::Hash as RHash, ParachainBlockData, PersistedValidationData, }; @@ -34,12 +34,14 @@ use sp_externalities::{set_and_run_with_externalities, Externalities}; use sp_io::KillStorageResult; use sp_runtime::traits::{Block as BlockT, Extrinsic, HashingFor, Header as HeaderT}; use sp_std::prelude::*; -use sp_trie::MemoryDB; +use sp_trie::{MemoryDB, ProofSizeProvider}; +use trie_recorder::SizeOnlyRecorderProvider; type TrieBackend = sp_state_machine::TrieBackend< MemoryDB>, HashingFor, trie_cache::CacheProvider>, + SizeOnlyRecorderProvider>, >; type Ext<'a, B> = sp_state_machine::Ext<'a, HashingFor, TrieBackend>; @@ -48,6 +50,9 @@ fn with_externalities R, R>(f: F) -> R { sp_externalities::with_externalities(f).expect("Environmental externalities not set.") } +// Recorder instance to be used during this validate_block call. +environmental::environmental!(recorder: trait ProofSizeProvider); + /// Validate the given parachain block. /// /// This function is doing roughly the following: @@ -120,6 +125,7 @@ where sp_std::mem::drop(storage_proof); + let mut recorder = SizeOnlyRecorderProvider::new(); let cache_provider = trie_cache::CacheProvider::new(); // We use the storage root of the `parent_head` to ensure that it is the correct root. // This is already being done above while creating the in-memory db, but let's be paranoid!! @@ -128,6 +134,7 @@ where *parent_header.state_root(), cache_provider, ) + .with_recorder(recorder.clone()) .build(); let _guard = ( @@ -167,9 +174,11 @@ where .replace_implementation(host_default_child_storage_next_key), sp_io::offchain_index::host_set.replace_implementation(host_offchain_index_set), sp_io::offchain_index::host_clear.replace_implementation(host_offchain_index_clear), + cumulus_primitives_proof_size_hostfunction::storage_proof_size::host_storage_proof_size + .replace_implementation(host_storage_proof_size), ); - run_with_externalities::(&backend, || { + run_with_externalities_and_recorder::(&backend, &mut recorder, || { let relay_chain_proof = crate::RelayChainStateProof::new( PSC::SelfParaId::get(), inherent_data.validation_data.relay_parent_storage_root, @@ -190,7 +199,7 @@ where } }); - run_with_externalities::(&backend, || { + run_with_externalities_and_recorder::(&backend, &mut recorder, || { let head_data = HeadData(block.header().encode()); E::execute_block(block); @@ -265,15 +274,17 @@ fn validate_validation_data( ); } -/// Run the given closure with the externalities set. -fn run_with_externalities R>( +/// Run the given closure with the externalities and recorder set. +fn run_with_externalities_and_recorder R>( backend: &TrieBackend, + recorder: &mut SizeOnlyRecorderProvider>, execute: F, ) -> R { let mut overlay = sp_state_machine::OverlayedChanges::default(); let mut ext = Ext::::new(&mut overlay, backend); + recorder.reset(); - set_and_run_with_externalities(&mut ext, || execute()) + recorder::using(recorder, || set_and_run_with_externalities(&mut ext, || execute())) } fn host_storage_read(key: &[u8], value_out: &mut [u8], value_offset: u32) -> Option { @@ -305,6 +316,10 @@ fn host_storage_clear(key: &[u8]) { with_externalities(|ext| ext.place_storage(key.to_vec(), None)) } +fn host_storage_proof_size() -> u64 { + recorder::with(|rec| rec.estimate_encoded_size()).expect("Recorder is always set; qed") as _ +} + fn host_storage_root(version: StateVersion) -> Vec { with_externalities(|ext| ext.storage_root(version)) } diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index f17ac6007a09bf02011473f95a537855eb43f3b0..a9fb65e11089fab87c543fbb6d7b54f6231a1dd7 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -59,7 +59,7 @@ fn call_validate_block( } fn create_test_client() -> (Client, Header) { - let client = TestClientBuilder::new().build(); + let client = TestClientBuilder::new().enable_import_proof_recording().build(); let genesis_header = client .header(client.chain_info().genesis_hash) diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs index e73aef70aa491fc68aad4f9479222d9a076e7edc..48310670c074d1be2ec86f582c0c84622abc086c 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs @@ -97,6 +97,7 @@ pub(crate) struct SizeOnlyRecorderProvider { } impl SizeOnlyRecorderProvider { + /// Create a new instance of [`SizeOnlyRecorderProvider`] pub fn new() -> Self { Self { seen_nodes: Default::default(), @@ -104,6 +105,13 @@ impl SizeOnlyRecorderProvider { recorded_keys: Default::default(), } } + + /// Reset the internal state. + pub fn reset(&self) { + self.seen_nodes.borrow_mut().clear(); + *self.encoded_size.borrow_mut() = 0; + self.recorded_keys.borrow_mut().clear(); + } } impl sp_trie::TrieRecorderProvider for SizeOnlyRecorderProvider { @@ -281,6 +289,9 @@ mod tests { reference_recorder.estimate_encoded_size(), recorder_for_test.estimate_encoded_size() ); + + recorder_for_test.reset(); + assert_eq!(recorder_for_test.estimate_encoded_size(), 0) } } } diff --git a/cumulus/pallets/xcmp-queue/src/lib.rs b/cumulus/pallets/xcmp-queue/src/lib.rs index 5b900769622afa3cba2c47f493ebff1a279b182b..e92169be16b0b8466582f3dd143d9e3e1807a42d 100644 --- a/cumulus/pallets/xcmp-queue/src/lib.rs +++ b/cumulus/pallets/xcmp-queue/src/lib.rs @@ -600,7 +600,7 @@ impl Pallet { let QueueConfigData { drop_threshold, .. } = >::get(); let fp = T::XcmpQueue::footprint(sender); // Assume that it will not fit into the current page: - let new_pages = fp.pages.saturating_add(1); + let new_pages = fp.ready_pages.saturating_add(1); if new_pages > drop_threshold { // This should not happen since the channel should have been suspended in // [`on_queue_changed`]. @@ -663,12 +663,12 @@ impl OnQueueChanged for Pallet { let mut suspended_channels = >::get(); let suspended = suspended_channels.contains(¶); - if suspended && fp.pages <= resume_threshold { + if suspended && fp.ready_pages <= resume_threshold { Self::send_signal(para, ChannelSignal::Resume); suspended_channels.remove(¶); >::put(suspended_channels); - } else if !suspended && fp.pages >= suspend_threshold { + } else if !suspended && fp.ready_pages >= suspend_threshold { log::warn!("XCMP queue for sibling {:?} is full; suspending channel.", para); Self::send_signal(para, ChannelSignal::Suspend); diff --git a/cumulus/pallets/xcmp-queue/src/migration.rs b/cumulus/pallets/xcmp-queue/src/migration.rs index 6c86c3011d23807adfbde801ec6865b6731822df..c7fa61a3e3f0513e53d67fcee4efd6c3cecf29f6 100644 --- a/cumulus/pallets/xcmp-queue/src/migration.rs +++ b/cumulus/pallets/xcmp-queue/src/migration.rs @@ -24,7 +24,7 @@ use frame_support::{ weights::{constants::WEIGHT_REF_TIME_PER_MILLIS, Weight}, }; -/// The current storage version. +/// The in-code storage version. pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); pub const LOG: &str = "runtime::xcmp-queue-migration"; diff --git a/cumulus/pallets/xcmp-queue/src/mock.rs b/cumulus/pallets/xcmp-queue/src/mock.rs index 08ab58ce816046336fcba0a2318bd2ff77227e5f..1fb88cafd93c425833d38c0904f59490cc479294 100644 --- a/cumulus/pallets/xcmp-queue/src/mock.rs +++ b/cumulus/pallets/xcmp-queue/src/mock.rs @@ -247,6 +247,7 @@ impl> EnqueueMessage for EnqueueToLocalStorage } } footprint.pages = footprint.storage.size as u32 / 16; // Number does not matter + footprint.ready_pages = footprint.pages; footprint } } diff --git a/cumulus/parachain-template/pallets/template/README.md b/cumulus/parachain-template/pallets/template/README.md deleted file mode 100644 index 5a6461233465c327d233fc5d97bdb3a6a85f8fd9..0000000000000000000000000000000000000000 --- a/cumulus/parachain-template/pallets/template/README.md +++ /dev/null @@ -1 +0,0 @@ -License: Unlicense diff --git a/cumulus/parachain-template/pallets/template/src/benchmarking.rs b/cumulus/parachain-template/pallets/template/src/benchmarking.rs deleted file mode 100644 index 8bba2a09867dea1d55e487661c01fd72acdf4dc9..0000000000000000000000000000000000000000 --- a/cumulus/parachain-template/pallets/template/src/benchmarking.rs +++ /dev/null @@ -1,20 +0,0 @@ -//! Benchmarking setup for pallet-parachain-template - -use super::*; - -#[allow(unused)] -use crate::Pallet as Template; -use frame_benchmarking::{benchmarks, impl_benchmark_test_suite, whitelisted_caller}; -use frame_system::RawOrigin; - -benchmarks! { - do_something { - let s in 0 .. 100; - let caller: T::AccountId = whitelisted_caller(); - }: _(RawOrigin::Signed(caller), s) - verify { - assert_eq!(Something::::get(), Some(s)); - } -} - -impl_benchmark_test_suite!(Template, crate::mock::new_test_ext(), crate::mock::Test,); diff --git a/cumulus/parachains/chain-specs/asset-hub-kusama.json b/cumulus/parachains/chain-specs/asset-hub-kusama.json index fba74b17f9607f58bdb17d7a0b05c8b764a9c4e5..654275eade81e5379004e649e11eb7e4acb2ec1f 100644 --- a/cumulus/parachains/chain-specs/asset-hub-kusama.json +++ b/cumulus/parachains/chain-specs/asset-hub-kusama.json @@ -25,7 +25,9 @@ "/dns/statemine-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWCKUrE5uaXQ288ko3Ex3zCyozyJLG47KEYTopinnXNtYL", "/dns/mine14.rotko.net/tcp/33524/p2p/12D3KooWJUFnjR2PNbsJhudwPVaWCoZy1acPGKjM2cSuGj345BBu", "/dns/mine14.rotko.net/tcp/34524/ws/p2p/12D3KooWJUFnjR2PNbsJhudwPVaWCoZy1acPGKjM2cSuGj345BBu", - "/dns/mine14.rotko.net/tcp/35524/wss/p2p/12D3KooWJUFnjR2PNbsJhudwPVaWCoZy1acPGKjM2cSuGj345BBu" + "/dns/mine14.rotko.net/tcp/35524/wss/p2p/12D3KooWJUFnjR2PNbsJhudwPVaWCoZy1acPGKjM2cSuGj345BBu", + "/dns/asset-hub-kusama.bootnodes.polkadotters.com/tcp/30511/p2p/12D3KooWDpk7wVH7RgjErEvbvAZ2kY5VeaAwRJP5ojmn1e8b8UbU", + "/dns/asset-hub-kusama.bootnodes.polkadotters.com/tcp/30513/wss/p2p/12D3KooWDpk7wVH7RgjErEvbvAZ2kY5VeaAwRJP5ojmn1e8b8UbU" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/asset-hub-polkadot.json b/cumulus/parachains/chain-specs/asset-hub-polkadot.json index 685a00ddc7145ed650f7cb5496fe81cfb23f0ffb..454060d2a87afb84407323263eea97d2f12d5a68 100644 --- a/cumulus/parachains/chain-specs/asset-hub-polkadot.json +++ b/cumulus/parachains/chain-specs/asset-hub-polkadot.json @@ -25,7 +25,9 @@ "/dns/statemint-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWLKxHom7f3XawRJqrF8RwiKK5Sj3qZqz5c7hF6eJeXhTx", "/dns/mint14.rotko.net/tcp/33514/p2p/12D3KooWKkzLjYF6M5eEs7nYiqEtRqY8SGVouoCwo3nCWsRnThDW", "/dns/mint14.rotko.net/tcp/34514/ws/p2p/12D3KooWKkzLjYF6M5eEs7nYiqEtRqY8SGVouoCwo3nCWsRnThDW", - "/dns/mint14.rotko.net/tcp/35514/wss/p2p/12D3KooWKkzLjYF6M5eEs7nYiqEtRqY8SGVouoCwo3nCWsRnThDW" + "/dns/mint14.rotko.net/tcp/35514/wss/p2p/12D3KooWKkzLjYF6M5eEs7nYiqEtRqY8SGVouoCwo3nCWsRnThDW", + "/dns/asset-hub-polkadot.bootnodes.polkadotters.com/tcp/30508/p2p/12D3KooWKbfY9a9oywxMJKiALmt7yhrdQkjXMtvxhhDDN23vG93R", + "/dns/asset-hub-polkadot.bootnodes.polkadotters.com/tcp/30510/wss/p2p/12D3KooWKbfY9a9oywxMJKiALmt7yhrdQkjXMtvxhhDDN23vG93R" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/asset-hub-westend.json b/cumulus/parachains/chain-specs/asset-hub-westend.json index 6f42b5f7d8bb4a76c7390b538a68b1bb0acc1613..670935c9d2474242307d691d8707030c70f49982 100644 --- a/cumulus/parachains/chain-specs/asset-hub-westend.json +++ b/cumulus/parachains/chain-specs/asset-hub-westend.json @@ -23,7 +23,9 @@ "/dns/westmint-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWDoq4PVdWm5nzRSvEz3DSSKjVgRhWVUaKyi5JMKwJKYbk", "/dns/wmint14.rotko.net/tcp/33534/p2p/12D3KooWE4UDXqgtTcMCyUQ8S4uvaT8VMzzTBA6NWmKuYwTacWuN", "/dns/wmint14.rotko.net/tcp/34534/ws/p2p/12D3KooWE4UDXqgtTcMCyUQ8S4uvaT8VMzzTBA6NWmKuYwTacWuN", - "/dns/wmint14.rotko.net/tcp/35534/wss/p2p/12D3KooWE4UDXqgtTcMCyUQ8S4uvaT8VMzzTBA6NWmKuYwTacWuN" + "/dns/wmint14.rotko.net/tcp/35534/wss/p2p/12D3KooWE4UDXqgtTcMCyUQ8S4uvaT8VMzzTBA6NWmKuYwTacWuN", + "/dns/asset-hub-westend.bootnodes.polkadotters.com/tcp/30514/p2p/12D3KooWNFYysCqmojxqjjaTfD2VkWBNngfyUKWjcR4WFixfHNTk", + "/dns/asset-hub-westend.bootnodes.polkadotters.com/tcp/30516/wss/p2p/12D3KooWNFYysCqmojxqjjaTfD2VkWBNngfyUKWjcR4WFixfHNTk" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/bridge-hub-kusama.json b/cumulus/parachains/chain-specs/bridge-hub-kusama.json index 0ef81806cc5c08621f3c0a308425c72f836c3ef4..90b70b05016d52aeecaea4870f438bf980f7263b 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-kusama.json +++ b/cumulus/parachains/chain-specs/bridge-hub-kusama.json @@ -25,7 +25,9 @@ "/dns/bridgehub-kusama-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWQMWofXj8v3RroDNnrhv1iURqm8vnaG98AdGnCn2YoDcW", "/dns/kbr13.rotko.net/tcp/33553/p2p/12D3KooWAmBp54mUEYtvsk2kxNEsDbAvdUMcaghxKXgUQxmPEQ66", "/dns/kbr13.rotko.net/tcp/34553/ws/p2p/12D3KooWAmBp54mUEYtvsk2kxNEsDbAvdUMcaghxKXgUQxmPEQ66", - "/dns/kbr13.rotko.net/tcp/35553/wss/p2p/12D3KooWAmBp54mUEYtvsk2kxNEsDbAvdUMcaghxKXgUQxmPEQ66" + "/dns/kbr13.rotko.net/tcp/35553/wss/p2p/12D3KooWAmBp54mUEYtvsk2kxNEsDbAvdUMcaghxKXgUQxmPEQ66", + "/dns/bridge-hub-kusama.bootnodes.polkadotters.com/tcp/30520/p2p/12D3KooWH3pucezRRS5esoYyzZsUkKWcPSByQxEvmM819QL1HPLV", + "/dns/bridge-hub-kusama.bootnodes.polkadotters.com/tcp/30522/wss/p2p/12D3KooWH3pucezRRS5esoYyzZsUkKWcPSByQxEvmM819QL1HPLV" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/bridge-hub-polkadot.json b/cumulus/parachains/chain-specs/bridge-hub-polkadot.json index 130bdf31ef211ba6d353fa97944b2bf80320997d..a9444b89e1e2cce9c79a367c4c561e910f1f28f7 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-polkadot.json +++ b/cumulus/parachains/chain-specs/bridge-hub-polkadot.json @@ -21,7 +21,9 @@ "/dns/bridgehub-polkadot-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWPNZm78tWUmKbta3SXdkqTPsquRc8ekEbJjZsGGi7YiRi", "/dns/pbr13.rotko.net/tcp/33543/p2p/12D3KooWMxZY7tDc2Rh454VaJJ7RexKAXVS6xSBEvTnXSGCnuGDw", "/dns/pbr13.rotko.net/tcp/34543/ws/p2p/12D3KooWMxZY7tDc2Rh454VaJJ7RexKAXVS6xSBEvTnXSGCnuGDw", - "/dns/pbr13.rotko.net/tcp/35543/wss/p2p/12D3KooWMxZY7tDc2Rh454VaJJ7RexKAXVS6xSBEvTnXSGCnuGDw" + "/dns/pbr13.rotko.net/tcp/35543/wss/p2p/12D3KooWMxZY7tDc2Rh454VaJJ7RexKAXVS6xSBEvTnXSGCnuGDw", + "/dns/bridge-hub-polkadot.bootnodes.polkadotters.com/tcp/30517/p2p/12D3KooWLUNE3LHPDa1WrrZaYT7ArK66CLM1bPv7kKz74UcLnQRB", + "/dns/bridge-hub-polkadot.bootnodes.polkadotters.com/tcp/30519/wss/p2p/12D3KooWLUNE3LHPDa1WrrZaYT7ArK66CLM1bPv7kKz74UcLnQRB" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/bridge-hub-westend.json b/cumulus/parachains/chain-specs/bridge-hub-westend.json index 018ab0ee6fd9810595c841237dd253b9463aed79..447207a58107a95bcd5fca173d9d5476a0ce9cab 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-westend.json +++ b/cumulus/parachains/chain-specs/bridge-hub-westend.json @@ -19,7 +19,9 @@ "/dns/bridgehub-westend-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWBsBArCMxmQyo3feCEqMWuwyhb2LTRK8hmCCJxgrNeMke", "/dns/wbr13.rotko.net/tcp/33563/p2p/12D3KooWJyeRHpxZZbfBCNEgeUFzmRC5AMSAs2tJhjJS1k5hULkD", "/dns/wbr13.rotko.net/tcp/34563/ws/p2p/12D3KooWJyeRHpxZZbfBCNEgeUFzmRC5AMSAs2tJhjJS1k5hULkD", - "/dns/wbr13.rotko.net/tcp/35563/wss/p2p/12D3KooWJyeRHpxZZbfBCNEgeUFzmRC5AMSAs2tJhjJS1k5hULkD" + "/dns/wbr13.rotko.net/tcp/35563/wss/p2p/12D3KooWJyeRHpxZZbfBCNEgeUFzmRC5AMSAs2tJhjJS1k5hULkD", + "/dns/bridge-hub-westend.bootnodes.polkadotters.com/tcp/30523/p2p/12D3KooWPkwgJofp4GeeRwNgXqkp2aFwdLkCWv3qodpBJLwK43Jj", + "/dns/bridge-hub-westend.bootnodes.polkadotters.com/tcp/30525/wss/p2p/12D3KooWPkwgJofp4GeeRwNgXqkp2aFwdLkCWv3qodpBJLwK43Jj" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/collectives-polkadot.json b/cumulus/parachains/chain-specs/collectives-polkadot.json index e9f690234e4381f54377c7fe7174f25834a973a5..259669cf37a0643534eaa5d1776daf79ec402d75 100644 --- a/cumulus/parachains/chain-specs/collectives-polkadot.json +++ b/cumulus/parachains/chain-specs/collectives-polkadot.json @@ -25,7 +25,9 @@ "/dns/collectives-polkadot-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWDumvnNwPbBg5inBEapgjKU7ECdMHHgwfYeGWUkzYUE1c", "/dns/pch13.rotko.net/tcp/33573/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH", "/dns/pch13.rotko.net/tcp/34573/ws/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH", - "/dns/pch13.rotko.net/tcp/35573/wss/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH" + "/dns/pch13.rotko.net/tcp/35573/wss/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH", + "/dns/collectives-polkadot.bootnodes.polkadotters.com/tcp/30526/p2p/12D3KooWNohUjvJtGKUa8Vhy8C1ZBB5N8JATB6e7rdLVCioeb3ff", + "/dns/collectives-polkadot.bootnodes.polkadotters.com/tcp/30528/wss/p2p/12D3KooWNohUjvJtGKUa8Vhy8C1ZBB5N8JATB6e7rdLVCioeb3ff" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/collectives-westend.json b/cumulus/parachains/chain-specs/collectives-westend.json index ffe73b5a05a391cc1c5396c235460830c023297a..e459c631f8be9df7c5c52993c116f11ef619fefe 100644 --- a/cumulus/parachains/chain-specs/collectives-westend.json +++ b/cumulus/parachains/chain-specs/collectives-westend.json @@ -25,7 +25,9 @@ "/dns/westend-collectives-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWPFM93jgm4pgxx8PM8WJKAJF49qia8jRB95uciUQwYh7m", "/dns/wch13.rotko.net/tcp/33593/p2p/12D3KooWPG85zhuSRoyptjLkFD4iJFistjiBmc15JgQ96B4fdXYr", "/dns/wch13.rotko.net/tcp/34593/ws/p2p/12D3KooWPG85zhuSRoyptjLkFD4iJFistjiBmc15JgQ96B4fdXYr", - "/dns/wch13.rotko.net/tcp/35593/wss/p2p/12D3KooWPG85zhuSRoyptjLkFD4iJFistjiBmc15JgQ96B4fdXYr" + "/dns/wch13.rotko.net/tcp/35593/wss/p2p/12D3KooWPG85zhuSRoyptjLkFD4iJFistjiBmc15JgQ96B4fdXYr", + "/dns/collectives-westend.bootnodes.polkadotters.com/tcp/30529/p2p/12D3KooWAFkXNSBfyPduZVgfS7pj5NuVpbU8Ee5gHeF8wvos7Yqn", + "/dns/collectives-westend.bootnodes.polkadotters.com/tcp/30531/wss/p2p/12D3KooWAFkXNSBfyPduZVgfS7pj5NuVpbU8Ee5gHeF8wvos7Yqn" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/people-westend.json b/cumulus/parachains/chain-specs/people-westend.json index fa29853c70b05e47df50445935be42a4637f240d..29fa0c9cde79c0daecbce029c8fa377ba1ff6918 100644 --- a/cumulus/parachains/chain-specs/people-westend.json +++ b/cumulus/parachains/chain-specs/people-westend.json @@ -10,7 +10,9 @@ "/dns/westend-people-collator-node-2.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWGVYTVKW7tYe51JvetvGvVLDPXzqQX1mueJgz14FgkmHG", "/dns/westend-people-collator-node-2.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWGVYTVKW7tYe51JvetvGvVLDPXzqQX1mueJgz14FgkmHG", "/dns/westend-people-collator-node-3.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWCF1eA2Gap69zgXD7Df3e9DqDUsGoByocggTGejoHjK23", - "/dns/westend-people-collator-node-3.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWCF1eA2Gap69zgXD7Df3e9DqDUsGoByocggTGejoHjK23" + "/dns/westend-people-collator-node-3.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWCF1eA2Gap69zgXD7Df3e9DqDUsGoByocggTGejoHjK23", + "/dns/identity-westend.bootnodes.polkadotters.com/tcp/30532/p2p/12D3KooWKr9San6KTM7REJ95cBaDoiciGcWnW8TTftEJgxGF5Ehb", + "/dns/identity-westend.bootnodes.polkadotters.com/tcp/30534/wss/p2p/12D3KooWKr9San6KTM7REJ95cBaDoiciGcWnW8TTftEJgxGF5Ehb" ], "telemetryEndpoints": null, "protocolId": null, @@ -79,4 +81,4 @@ "childrenDefault": {} } } -} \ No newline at end of file +} diff --git a/cumulus/parachains/common/src/xcm_config.rs b/cumulus/parachains/common/src/xcm_config.rs index 15b090923d501b3769a18efe1916a146d4474f42..a9756af7aed245ea792d12addbbb2fff529eedaf 100644 --- a/cumulus/parachains/common/src/xcm_config.rs +++ b/cumulus/parachains/common/src/xcm_config.rs @@ -45,8 +45,6 @@ where >::AssetId, >::Balance, >, - AccountIdOf: - From + Into, { fn charge_weight_in_fungibles( asset_id: as Inspect< diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs index 8f586a46a75cb68a7ccdf62d2f23a1865dcf5d9c..c1e030466559e86f126640a3ee2a97e3fbf5a8a5 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs @@ -16,7 +16,8 @@ mod genesis; pub use genesis::{genesis, ED, PARA_ID_A, PARA_ID_B}; pub use penpal_runtime::xcm_config::{ - LocalTeleportableToAssetHub, LocalTeleportableToAssetHubV3, XcmConfig, + CustomizableAssetFromSystemAssetHub, LocalTeleportableToAssetHub, + LocalTeleportableToAssetHubV3, XcmConfig, }; // Substrate diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/genesis.rs index 7db9679f1c3e1554ce97ae2f057fbcd8c27183ca..55437645b0523b577c2e9d455952f5526ba9df0b 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/genesis.rs @@ -78,7 +78,7 @@ pub fn genesis() -> Storage { }, babe: rococo_runtime::BabeConfig { authorities: Default::default(), - epoch_config: Some(rococo_runtime::BABE_GENESIS_EPOCH_CONFIG), + epoch_config: rococo_runtime::BABE_GENESIS_EPOCH_CONFIG, ..Default::default() }, sudo: rococo_runtime::SudoConfig { diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs index 578b307dd9339211de9cb793e4b4f63ffe9d3b15..700b80e63f6cf68e6095b7e02d84bb285ce720f9 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs @@ -94,7 +94,7 @@ pub fn genesis() -> Storage { }, babe: westend_runtime::BabeConfig { authorities: Default::default(), - epoch_config: Some(westend_runtime::BABE_GENESIS_EPOCH_CONFIG), + epoch_config: westend_runtime::BABE_GENESIS_EPOCH_CONFIG, ..Default::default() }, configuration: westend_runtime::ConfigurationConfig { config: get_host_config() }, diff --git a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs index 4bbb4701e43918f5f34b250a23c4c483f2b2d4d1..c93484829fe8193c69a4d9475e81201e3607796c 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs @@ -265,7 +265,7 @@ macro_rules! impl_assert_events_helpers_for_relay_chain { $crate::impls::assert_expected_events!( Self, vec![ - // XCM is succesfully received and proccessed + // XCM is successfully received and proccessed [<$chain RuntimeEvent>]::::MessageQueue($crate::impls::pallet_message_queue::Event::Processed { origin: $crate::impls::AggregateMessageOrigin::Ump($crate::impls::UmpQueueId::Para(id)), weight_used, diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs index a5957ee339306f580b7906b25d3adfff4a78deb1..ff069e2d3443ec13d8cb17a18c7127f48b6f34ef 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs @@ -18,6 +18,7 @@ use codec::{Decode, Encode}; use emulated_integration_tests_common::xcm_emulator::ConvertLocation; use frame_support::pallet_prelude::TypeInfo; use hex_literal::hex; +use rococo_system_emulated_network::penpal_emulated_chain::CustomizableAssetFromSystemAssetHub; use rococo_westend_system_emulated_network::BridgeHubRococoParaSender as BridgeHubRococoSender; use snowbridge_core::outbound::OperatingMode; use snowbridge_pallet_inbound_queue_fixtures::{ @@ -253,6 +254,16 @@ fn send_token_from_ethereum_to_penpal() { (PenpalASender::get(), INITIAL_FUND), ]); + PenpalA::execute_with(|| { + assert_ok!(::System::set_storage( + ::RuntimeOrigin::root(), + vec![( + CustomizableAssetFromSystemAssetHub::key().to_vec(), + Location::new(2, [GlobalConsensus(Ethereum { chain_id: CHAIN_ID })]).encode(), + )], + )); + }); + // The Weth asset location, identified by the contract address on Ethereum let weth_asset_location: Location = (Parent, Parent, EthereumNetwork::get(), AccountKey20 { network: None, key: WETH }).into(); diff --git a/cumulus/parachains/pallets/collective-content/src/benchmarking.rs b/cumulus/parachains/pallets/collective-content/src/benchmarking.rs index 943386a842766129c2b5d429dc3a9648249ea73f..3d6bf073778a294e39287285ddb8379d57d36c8f 100644 --- a/cumulus/parachains/pallets/collective-content/src/benchmarking.rs +++ b/cumulus/parachains/pallets/collective-content/src/benchmarking.rs @@ -16,7 +16,7 @@ //! The pallet benchmarks. use super::{Pallet as CollectiveContent, *}; -use frame_benchmarking::{impl_benchmark_test_suite, v2::*}; +use frame_benchmarking::v2::*; use frame_support::traits::EnsureOrigin; fn assert_last_event, I: 'static>(generic_event: >::RuntimeEvent) { diff --git a/cumulus/parachains/pallets/collective-content/src/lib.rs b/cumulus/parachains/pallets/collective-content/src/lib.rs index 7a685858accb67868837e734ba9808741a4f7ea1..b1c960ad6a0d337d6b84aaaba59f7fa0625a8124 100644 --- a/cumulus/parachains/pallets/collective-content/src/lib.rs +++ b/cumulus/parachains/pallets/collective-content/src/lib.rs @@ -59,7 +59,7 @@ pub mod pallet { use frame_system::pallet_prelude::*; use sp_runtime::{traits::BadOrigin, Saturating}; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(0); #[pallet::pallet] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index ed893091533a2999ab3c3adffd96a6029baf64eb..17a12dd2f6f763f5d3bc22364b8f83fe3e401d44 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -113,7 +113,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("statemine"), impl_name: create_runtime_str!("statemine"), authoring_version: 1, - spec_version: 1_007_000, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 14, @@ -126,7 +126,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("statemine"), impl_name: create_runtime_str!("statemine"), authoring_version: 1, - spec_version: 1_007_000, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 14, @@ -383,8 +383,8 @@ pub type ForeignAssetsInstance = pallet_assets::Instance2; impl pallet_assets::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Balance = Balance; - type AssetId = xcm::v3::MultiLocation; - type AssetIdParameter = xcm::v3::MultiLocation; + type AssetId = xcm::v3::Location; + type AssetIdParameter = xcm::v3::Location; type Currency = Balances; type CreateOrigin = ForeignCreators< ( @@ -986,37 +986,37 @@ impl frame_support::traits::OnRuntimeUpgrade for InitStorageVersions { let mut writes = 0; if PolkadotXcm::on_chain_storage_version() == StorageVersion::new(0) { - PolkadotXcm::current_storage_version().put::(); + PolkadotXcm::in_code_storage_version().put::(); writes.saturating_inc(); } if Multisig::on_chain_storage_version() == StorageVersion::new(0) { - Multisig::current_storage_version().put::(); + Multisig::in_code_storage_version().put::(); writes.saturating_inc(); } if Assets::on_chain_storage_version() == StorageVersion::new(0) { - Assets::current_storage_version().put::(); + Assets::in_code_storage_version().put::(); writes.saturating_inc(); } if Uniques::on_chain_storage_version() == StorageVersion::new(0) { - Uniques::current_storage_version().put::(); + Uniques::in_code_storage_version().put::(); writes.saturating_inc(); } if Nfts::on_chain_storage_version() == StorageVersion::new(0) { - Nfts::current_storage_version().put::(); + Nfts::in_code_storage_version().put::(); writes.saturating_inc(); } if ForeignAssets::on_chain_storage_version() == StorageVersion::new(0) { - ForeignAssets::current_storage_version().put::(); + ForeignAssets::in_code_storage_version().put::(); writes.saturating_inc(); } if PoolAssets::on_chain_storage_version() == StorageVersion::new(0) { - PoolAssets::current_storage_version().put::(); + PoolAssets::in_code_storage_version().put::(); writes.saturating_inc(); } @@ -1043,6 +1043,7 @@ mod benches { [pallet_assets, Pool] [pallet_asset_conversion, AssetConversion] [pallet_balances, Balances] + [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] [pallet_nft_fractionalization, NftFractionalization] [pallet_nfts, Nfts] @@ -1052,6 +1053,7 @@ mod benches { [pallet_utility, Utility] [pallet_timestamp, Timestamp] [pallet_collator_selection, CollatorSelection] + [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] [pallet_xcm_bridge_hub_router, ToWestend] // XCM @@ -1091,7 +1093,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } @@ -1353,8 +1355,31 @@ impl_runtime_apis! { Config as XcmBridgeHubRouterConfig, }; + parameter_types! { + pub ExistentialDepositAsset: Option = Some(( + TokenLocation::get(), + ExistentialDeposit::get() + ).into()); + pub const RandomParaId: ParaId = ParaId::new(43211234); + } + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { + type DeliveryHelper = ( + cumulus_primitives_utility::ToParentDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForParentDelivery, + >, + polkadot_runtime_common::xcm_sender::ToParachainDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + PriceForSiblingParachainDelivery, + RandomParaId, + ParachainSystem, + > + ); + fn reachable_dest() -> Option { Some(Parent.into()) } @@ -1363,7 +1388,7 @@ impl_runtime_apis! { // Relay/native token can be teleported between AH and Relay. Some(( Asset { - fun: Fungible(EXISTENTIAL_DEPOSIT), + fun: Fungible(ExistentialDeposit::get()), id: AssetId(Parent.into()) }, Parent.into(), @@ -1371,17 +1396,13 @@ impl_runtime_apis! { } fn reserve_transferable_asset_and_dest() -> Option<(Asset, Location)> { - // AH can reserve transfer native token to some random parachain. - let random_para_id = 43211234; - ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests( - random_para_id.into() - ); Some(( Asset { - fun: Fungible(EXISTENTIAL_DEPOSIT), + fun: Fungible(ExistentialDeposit::get()), id: AssetId(Parent.into()) }, - ParentThen(Parachain(random_para_id).into()).into(), + // AH can reserve transfer native token to some random parachain. + ParentThen(Parachain(RandomParaId::get().into()).into()).into(), )) } @@ -1433,6 +1454,13 @@ impl_runtime_apis! { }); Some((assets, fee_index as u32, dest, verify)) } + + fn get_asset() -> Asset { + Asset { + id: AssetId(Location::parent()), + fun: Fungible(ExistentialDeposit::get()), + } + } } impl XcmBridgeHubRouterConfig for Runtime { @@ -1467,13 +1495,6 @@ impl_runtime_apis! { use xcm_config::{TokenLocation, MaxAssetsIntoHolding}; use pallet_xcm_benchmarks::asset_instance_from; - parameter_types! { - pub ExistentialDepositAsset: Option = Some(( - TokenLocation::get(), - ExistentialDeposit::get() - ).into()); - } - impl pallet_xcm_benchmarks::Config for Runtime { type XcmConfig = xcm_config::XcmConfig; type AccountIdConverter = xcm_config::LocationToAccountId; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs index f8820bbb58cb24afe1afe034e131414368089444..51b6543bae82ba496998706ab6c2aaf6e0ff604b 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_xcm` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-05, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-r43aesjn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,8 +64,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 25_003_000 picoseconds. - Weight::from_parts(25_800_000, 0) + // Minimum execution time: 22_136_000 picoseconds. + Weight::from_parts(22_518_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -90,8 +90,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 88_832_000 picoseconds. - Weight::from_parts(90_491_000, 0) + // Minimum execution time: 92_277_000 picoseconds. + Weight::from_parts(94_843_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -118,8 +118,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `400` // Estimated: `6196` - // Minimum execution time: 138_911_000 picoseconds. - Weight::from_parts(142_483_000, 0) + // Minimum execution time: 120_110_000 picoseconds. + Weight::from_parts(122_968_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(5)) @@ -148,8 +148,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `496` // Estimated: `6208` - // Minimum execution time: 146_932_000 picoseconds. - Weight::from_parts(153_200_000, 0) + // Minimum execution time: 143_116_000 picoseconds. + Weight::from_parts(147_355_000, 0) .saturating_add(Weight::from_parts(0, 6208)) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(7)) @@ -170,8 +170,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_081_000 picoseconds. - Weight::from_parts(7_397_000, 0) + // Minimum execution time: 6_517_000 picoseconds. + Weight::from_parts(6_756_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -181,8 +181,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_007_000 picoseconds. - Weight::from_parts(2_183_000, 0) + // Minimum execution time: 1_894_000 picoseconds. + Weight::from_parts(2_024_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -208,8 +208,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 28_790_000 picoseconds. - Weight::from_parts(29_767_000, 0) + // Minimum execution time: 27_314_000 picoseconds. + Weight::from_parts(28_787_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -234,8 +234,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `363` // Estimated: `3828` - // Minimum execution time: 30_951_000 picoseconds. - Weight::from_parts(31_804_000, 0) + // Minimum execution time: 29_840_000 picoseconds. + Weight::from_parts(30_589_000, 0) .saturating_add(Weight::from_parts(0, 3828)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -246,45 +246,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_164_000 picoseconds. - Weight::from_parts(2_311_000, 0) + // Minimum execution time: 1_893_000 picoseconds. + Weight::from_parts(2_017_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:4 w:2) + /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `162` - // Estimated: `11052` - // Minimum execution time: 16_906_000 picoseconds. - Weight::from_parts(17_612_000, 0) - .saturating_add(Weight::from_parts(0, 11052)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `159` + // Estimated: `13524` + // Minimum execution time: 19_211_000 picoseconds. + Weight::from_parts(19_552_000, 0) + .saturating_add(Weight::from_parts(0, 13524)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `166` - // Estimated: `11056` - // Minimum execution time: 17_443_000 picoseconds. - Weight::from_parts(18_032_000, 0) - .saturating_add(Weight::from_parts(0, 11056)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `163` + // Estimated: `13528` + // Minimum execution time: 19_177_000 picoseconds. + Weight::from_parts(19_704_000, 0) + .saturating_add(Weight::from_parts(0, 13528)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `173` - // Estimated: `13538` - // Minimum execution time: 18_992_000 picoseconds. - Weight::from_parts(19_464_000, 0) - .saturating_add(Weight::from_parts(0, 13538)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `16013` + // Minimum execution time: 20_449_000 picoseconds. + Weight::from_parts(21_075_000, 0) + .saturating_add(Weight::from_parts(0, 16013)) + .saturating_add(T::DbWeight::get().reads(6)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -304,36 +304,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `6152` - // Minimum execution time: 28_011_000 picoseconds. - Weight::from_parts(28_716_000, 0) + // Minimum execution time: 26_578_000 picoseconds. + Weight::from_parts(27_545_000, 0) .saturating_add(Weight::from_parts(0, 6152)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: // Measured: `206` - // Estimated: `8621` - // Minimum execution time: 9_533_000 picoseconds. - Weight::from_parts(9_856_000, 0) - .saturating_add(Weight::from_parts(0, 8621)) - .saturating_add(T::DbWeight::get().reads(3)) + // Estimated: `11096` + // Minimum execution time: 11_646_000 picoseconds. + Weight::from_parts(11_944_000, 0) + .saturating_add(Weight::from_parts(0, 11096)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `173` - // Estimated: `11063` - // Minimum execution time: 17_628_000 picoseconds. - Weight::from_parts(18_146_000, 0) - .saturating_add(Weight::from_parts(0, 11063)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `170` + // Estimated: `13535` + // Minimum execution time: 19_301_000 picoseconds. + Weight::from_parts(19_664_000, 0) + .saturating_add(Weight::from_parts(0, 13535)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -349,12 +349,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `215` - // Estimated: `11105` - // Minimum execution time: 34_877_000 picoseconds. - Weight::from_parts(35_607_000, 0) - .saturating_add(Weight::from_parts(0, 11105)) - .saturating_add(T::DbWeight::get().reads(10)) + // Measured: `212` + // Estimated: `13577` + // Minimum execution time: 35_715_000 picoseconds. + Weight::from_parts(36_915_000, 0) + .saturating_add(Weight::from_parts(0, 13577)) + .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -365,8 +365,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `1588` - // Minimum execution time: 5_370_000 picoseconds. - Weight::from_parts(5_616_000, 0) + // Minimum execution time: 4_871_000 picoseconds. + Weight::from_parts(5_066_000, 0) .saturating_add(Weight::from_parts(0, 1588)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -377,10 +377,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7740` // Estimated: `11205` - // Minimum execution time: 26_820_000 picoseconds. - Weight::from_parts(27_143_000, 0) + // Minimum execution time: 25_150_000 picoseconds. + Weight::from_parts(26_119_000, 0) .saturating_add(Weight::from_parts(0, 11205)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) + /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn claim_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `160` + // Estimated: `3625` + // Minimum execution time: 38_248_000 picoseconds. + Weight::from_parts(39_122_000, 0) + .saturating_add(Weight::from_parts(0, 3625)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs index cff2290222c35a07034d28c731cbe552130c8f4c..38c118dbb4b25fe2e345e9247a2a253d0becd613 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs @@ -22,13 +22,13 @@ use asset_hub_rococo_runtime::{ xcm_config::{ bridging, AssetFeeAsExistentialDepositMultiplierFeeCharger, CheckingAccount, ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger, ForeignCreatorsSovereignAccountOf, - LocationToAccountId, TokenLocation, TokenLocationV3, TrustBackedAssetsPalletLocation, - TrustBackedAssetsPalletLocationV3, XcmConfig, + LocationToAccountId, StakingPot, TokenLocation, TokenLocationV3, + TrustBackedAssetsPalletLocation, TrustBackedAssetsPalletLocationV3, XcmConfig, }, AllPalletsWithoutSystem, AssetConversion, AssetDeposit, Assets, Balances, CollatorSelection, ExistentialDeposit, ForeignAssets, ForeignAssetsInstance, MetadataDepositBase, - MetadataDepositPerByte, ParachainSystem, Runtime, RuntimeCall, RuntimeEvent, SessionKeys, - ToWestendXcmRouterInstance, TrustBackedAssetsInstance, XcmpQueue, + MetadataDepositPerByte, ParachainSystem, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, + SessionKeys, ToWestendXcmRouterInstance, TrustBackedAssetsInstance, XcmpQueue, }; use asset_test_utils::{ test_cases_over_bridge::TestBridgingConfig, CollatorSessionKey, CollatorSessionKeys, @@ -49,6 +49,7 @@ use frame_support::{ use parachains_common::{AccountId, AssetIdForTrustBackedAssets, AuraId, Balance}; use sp_consensus_aura::SlotDuration; use sp_runtime::traits::MaybeEquivalence; +use sp_std::ops::Mul; use std::convert::Into; use testnet_parachains_constants::rococo::{consensus::*, currency::UNITS, fee::WeightToFee}; use xcm::latest::prelude::{Assets as XcmAssets, *}; @@ -85,6 +86,52 @@ fn slot_durations() -> SlotDurations { } } +fn setup_pool_for_paying_fees_with_foreign_assets( + (foreign_asset_owner, foreign_asset_id_location, foreign_asset_id_minimum_balance): ( + AccountId, + xcm::v3::Location, + Balance, + ), +) { + let existential_deposit = ExistentialDeposit::get(); + + // setup a pool to pay fees with `foreign_asset_id_location` tokens + let pool_owner: AccountId = [14u8; 32].into(); + let native_asset = xcm::v3::Location::parent(); + let pool_liquidity: Balance = + existential_deposit.max(foreign_asset_id_minimum_balance).mul(100_000); + + let _ = Balances::force_set_balance( + RuntimeOrigin::root(), + pool_owner.clone().into(), + (existential_deposit + pool_liquidity).mul(2).into(), + ); + + assert_ok!(ForeignAssets::mint( + RuntimeOrigin::signed(foreign_asset_owner), + foreign_asset_id_location.into(), + pool_owner.clone().into(), + (foreign_asset_id_minimum_balance + pool_liquidity).mul(2).into(), + )); + + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(pool_owner.clone()), + Box::new(native_asset.into()), + Box::new(foreign_asset_id_location.into()) + )); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(pool_owner.clone()), + Box::new(native_asset.into()), + Box::new(foreign_asset_id_location.into()), + pool_liquidity, + pool_liquidity, + 1, + 1, + pool_owner, + )); +} + #[test] fn test_buy_and_refund_weight_in_native() { ExtBuilder::::default() @@ -1056,7 +1103,8 @@ fn limited_reserve_transfer_assets_for_native_asset_over_bridge_works( mod asset_hub_rococo_tests { use super::*; - use asset_hub_rococo_runtime::{PolkadotXcm, RuntimeOrigin}; + use asset_hub_rococo_runtime::PolkadotXcm; + use xcm_executor::traits::ConvertLocation; fn bridging_to_asset_hub_westend() -> TestBridgingConfig { let _ = PolkadotXcm::force_xcm_version( @@ -1081,27 +1129,135 @@ mod asset_hub_rococo_tests { } #[test] - fn receive_reserve_asset_deposited_wnd_from_asset_hub_westend_works() { + fn receive_reserve_asset_deposited_wnd_from_asset_hub_westend_fees_paid_by_pool_swap_works() { const BLOCK_AUTHOR_ACCOUNT: [u8; 32] = [13; 32]; + let block_author_account = AccountId::from(BLOCK_AUTHOR_ACCOUNT); + let staking_pot = StakingPot::get(); + + let foreign_asset_id_location = xcm::v3::Location::new( + 2, + [xcm::v3::Junction::GlobalConsensus(xcm::v3::NetworkId::Westend)], + ); + let foreign_asset_id_minimum_balance = 1_000_000_000; + // sovereign account as foreign asset owner (can be whoever for this scenario) + let foreign_asset_owner = + LocationToAccountId::convert_location(&Location::parent()).unwrap(); + let foreign_asset_create_params = + (foreign_asset_owner, foreign_asset_id_location, foreign_asset_id_minimum_balance); + + asset_test_utils::test_cases_over_bridge::receive_reserve_asset_deposited_from_different_consensus_works::< + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + ForeignAssetsInstance, + >( + collator_session_keys().add(collator_session_key(BLOCK_AUTHOR_ACCOUNT)), + ExistentialDeposit::get(), + AccountId::from([73; 32]), + block_author_account, + // receiving WNDs + foreign_asset_create_params.clone(), + 1000000000000, + || { + // setup pool for paying fees to touch `SwapFirstAssetTrader` + setup_pool_for_paying_fees_with_foreign_assets(foreign_asset_create_params); + // staking pot account for collecting local native fees from `BuyExecution` + let _ = Balances::force_set_balance(RuntimeOrigin::root(), StakingPot::get().into(), ExistentialDeposit::get()); + // prepare bridge configuration + bridging_to_asset_hub_westend() + }, + ( + [PalletInstance(bp_bridge_hub_rococo::WITH_BRIDGE_ROCOCO_TO_WESTEND_MESSAGES_PALLET_INDEX)].into(), + GlobalConsensus(Westend), + [Parachain(1000)].into() + ), + || { + // check staking pot for ED + assert_eq!(Balances::free_balance(&staking_pot), ExistentialDeposit::get()); + // check now foreign asset for staking pot + assert_eq!( + ForeignAssets::balance( + foreign_asset_id_location.into(), + &staking_pot + ), + 0 + ); + }, + || { + // `SwapFirstAssetTrader` - staking pot receives xcm fees in ROCs + assert!( + Balances::free_balance(&staking_pot) > ExistentialDeposit::get() + ); + // staking pot receives no foreign assets + assert_eq!( + ForeignAssets::balance( + foreign_asset_id_location.into(), + &staking_pot + ), + 0 + ); + } + ) + } + + #[test] + fn receive_reserve_asset_deposited_wnd_from_asset_hub_westend_fees_paid_by_sufficient_asset_works( + ) { + const BLOCK_AUTHOR_ACCOUNT: [u8; 32] = [13; 32]; + let block_author_account = AccountId::from(BLOCK_AUTHOR_ACCOUNT); + let staking_pot = StakingPot::get(); + + let foreign_asset_id_location = xcm::v3::Location::new( + 2, + [xcm::v3::Junction::GlobalConsensus(xcm::v3::NetworkId::Westend)], + ); + let foreign_asset_id_minimum_balance = 1_000_000_000; + // sovereign account as foreign asset owner (can be whoever for this scenario) + let foreign_asset_owner = + LocationToAccountId::convert_location(&Location::parent()).unwrap(); + let foreign_asset_create_params = + (foreign_asset_owner, foreign_asset_id_location, foreign_asset_id_minimum_balance); + asset_test_utils::test_cases_over_bridge::receive_reserve_asset_deposited_from_different_consensus_works::< Runtime, AllPalletsWithoutSystem, XcmConfig, - LocationToAccountId, ForeignAssetsInstance, >( collator_session_keys().add(collator_session_key(BLOCK_AUTHOR_ACCOUNT)), ExistentialDeposit::get(), AccountId::from([73; 32]), - AccountId::from(BLOCK_AUTHOR_ACCOUNT), + block_author_account.clone(), // receiving WNDs - (xcm::v3::Location::new(2, [xcm::v3::Junction::GlobalConsensus(xcm::v3::NetworkId::Westend)]), 1000000000000, 1_000_000_000), + foreign_asset_create_params, + 1000000000000, bridging_to_asset_hub_westend, ( [PalletInstance(bp_bridge_hub_rococo::WITH_BRIDGE_ROCOCO_TO_WESTEND_MESSAGES_PALLET_INDEX)].into(), GlobalConsensus(Westend), [Parachain(1000)].into() - ) + ), + || { + // check block author before + assert_eq!( + ForeignAssets::balance( + foreign_asset_id_location.into(), + &block_author_account + ), + 0 + ); + }, + || { + // `TakeFirstAssetTrader` puts fees to the block author + assert!( + ForeignAssets::balance( + foreign_asset_id_location.into(), + &block_author_account + ) > 0 + ); + // `SwapFirstAssetTrader` did not work + assert_eq!(Balances::free_balance(&staking_pot), 0); + } ) } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index bcd3a9aa896f89ed328f361ddc828d373d3db489..1ead28978550ca653c22df12f2588f406ce67f7f 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -110,7 +110,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westmint"), impl_name: create_runtime_str!("westmint"), authoring_version: 1, - spec_version: 1_007_000, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 14, @@ -1037,17 +1037,17 @@ impl frame_support::traits::OnRuntimeUpgrade for InitStorageVersions { let mut writes = 0; if PolkadotXcm::on_chain_storage_version() == StorageVersion::new(0) { - PolkadotXcm::current_storage_version().put::(); + PolkadotXcm::in_code_storage_version().put::(); writes.saturating_inc(); } if ForeignAssets::on_chain_storage_version() == StorageVersion::new(0) { - ForeignAssets::current_storage_version().put::(); + ForeignAssets::in_code_storage_version().put::(); writes.saturating_inc(); } if PoolAssets::on_chain_storage_version() == StorageVersion::new(0) { - PoolAssets::current_storage_version().put::(); + PoolAssets::in_code_storage_version().put::(); writes.saturating_inc(); } @@ -1084,6 +1084,7 @@ mod benches { [pallet_utility, Utility] [pallet_timestamp, Timestamp] [pallet_collator_selection, CollatorSelection] + [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] [pallet_xcm_bridge_hub_router, ToRococo] // XCM @@ -1123,7 +1124,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } @@ -1426,8 +1427,31 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + parameter_types! { + pub ExistentialDepositAsset: Option = Some(( + WestendLocation::get(), + ExistentialDeposit::get() + ).into()); + pub const RandomParaId: ParaId = ParaId::new(43211234); + } + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { + type DeliveryHelper = ( + cumulus_primitives_utility::ToParentDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForParentDelivery, + >, + polkadot_runtime_common::xcm_sender::ToParachainDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + PriceForSiblingParachainDelivery, + RandomParaId, + ParachainSystem, + > + ); + fn reachable_dest() -> Option { Some(Parent.into()) } @@ -1436,7 +1460,7 @@ impl_runtime_apis! { // Relay/native token can be teleported between AH and Relay. Some(( Asset { - fun: Fungible(EXISTENTIAL_DEPOSIT), + fun: Fungible(ExistentialDeposit::get()), id: AssetId(Parent.into()) }, Parent.into(), @@ -1444,17 +1468,13 @@ impl_runtime_apis! { } fn reserve_transferable_asset_and_dest() -> Option<(Asset, Location)> { - // AH can reserve transfer native token to some random parachain. - let random_para_id = 43211234; - ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests( - random_para_id.into() - ); Some(( Asset { - fun: Fungible(EXISTENTIAL_DEPOSIT), + fun: Fungible(ExistentialDeposit::get()), id: AssetId(Parent.into()) }, - ParentThen(Parachain(random_para_id).into()).into(), + // AH can reserve transfer native token to some random parachain. + ParentThen(Parachain(RandomParaId::get().into()).into()).into(), )) } @@ -1506,6 +1526,13 @@ impl_runtime_apis! { }); Some((assets, fee_index as u32, dest, verify)) } + + fn get_asset() -> Asset { + Asset { + id: AssetId(Location::parent()), + fun: Fungible(ExistentialDeposit::get()), + } + } } use pallet_xcm_bridge_hub_router::benchmarking::{ @@ -1545,13 +1572,6 @@ impl_runtime_apis! { use xcm_config::{MaxAssetsIntoHolding, WestendLocation}; use pallet_xcm_benchmarks::asset_instance_from; - parameter_types! { - pub ExistentialDepositAsset: Option = Some(( - WestendLocation::get(), - ExistentialDeposit::get() - ).into()); - } - impl pallet_xcm_benchmarks::Config for Runtime { type XcmConfig = xcm_config::XcmConfig; type AccountIdConverter = xcm_config::LocationToAccountId; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs index 504731f4a9ef743e62090582901a63f7aee78829..71facff87d35f350114850653acfbebeb5c29529 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_xcm` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-05, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-r43aesjn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,8 +64,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 25_482_000 picoseconds. - Weight::from_parts(26_622_000, 0) + // Minimum execution time: 21_630_000 picoseconds. + Weight::from_parts(22_306_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -90,8 +90,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 87_319_000 picoseconds. - Weight::from_parts(89_764_000, 0) + // Minimum execution time: 91_802_000 picoseconds. + Weight::from_parts(93_672_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -118,8 +118,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `367` // Estimated: `6196` - // Minimum execution time: 139_133_000 picoseconds. - Weight::from_parts(141_507_000, 0) + // Minimum execution time: 118_930_000 picoseconds. + Weight::from_parts(122_306_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(5)) @@ -148,8 +148,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `496` // Estimated: `6208` - // Minimum execution time: 144_241_000 picoseconds. - Weight::from_parts(149_709_000, 0) + // Minimum execution time: 140_527_000 picoseconds. + Weight::from_parts(144_501_000, 0) .saturating_add(Weight::from_parts(0, 6208)) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(7)) @@ -158,8 +158,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_392_000 picoseconds. - Weight::from_parts(10_779_000, 0) + // Minimum execution time: 7_556_000 picoseconds. + Weight::from_parts(7_798_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) @@ -168,8 +168,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_088_000 picoseconds. - Weight::from_parts(7_257_000, 0) + // Minimum execution time: 6_373_000 picoseconds. + Weight::from_parts(6_603_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -179,8 +179,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_095_000 picoseconds. - Weight::from_parts(2_136_000, 0) + // Minimum execution time: 1_941_000 picoseconds. + Weight::from_parts(2_088_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -206,8 +206,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 28_728_000 picoseconds. - Weight::from_parts(29_349_000, 0) + // Minimum execution time: 27_080_000 picoseconds. + Weight::from_parts(27_820_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -232,8 +232,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `363` // Estimated: `3828` - // Minimum execution time: 30_605_000 picoseconds. - Weight::from_parts(31_477_000, 0) + // Minimum execution time: 28_850_000 picoseconds. + Weight::from_parts(29_506_000, 0) .saturating_add(Weight::from_parts(0, 3828)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -244,45 +244,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_137_000 picoseconds. - Weight::from_parts(2_303_000, 0) + // Minimum execution time: 2_033_000 picoseconds. + Weight::from_parts(2_201_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:4 w:2) + /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `162` - // Estimated: `11052` - // Minimum execution time: 16_719_000 picoseconds. - Weight::from_parts(17_329_000, 0) - .saturating_add(Weight::from_parts(0, 11052)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `159` + // Estimated: `13524` + // Minimum execution time: 18_844_000 picoseconds. + Weight::from_parts(19_197_000, 0) + .saturating_add(Weight::from_parts(0, 13524)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `166` - // Estimated: `11056` - // Minimum execution time: 16_687_000 picoseconds. - Weight::from_parts(17_405_000, 0) - .saturating_add(Weight::from_parts(0, 11056)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `163` + // Estimated: `13528` + // Minimum execution time: 18_940_000 picoseconds. + Weight::from_parts(19_450_000, 0) + .saturating_add(Weight::from_parts(0, 13528)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `173` - // Estimated: `13538` - // Minimum execution time: 18_751_000 picoseconds. - Weight::from_parts(19_130_000, 0) - .saturating_add(Weight::from_parts(0, 13538)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `16013` + // Minimum execution time: 20_521_000 picoseconds. + Weight::from_parts(21_076_000, 0) + .saturating_add(Weight::from_parts(0, 16013)) + .saturating_add(T::DbWeight::get().reads(6)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -302,36 +302,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `6152` - // Minimum execution time: 27_189_000 picoseconds. - Weight::from_parts(27_760_000, 0) + // Minimum execution time: 26_007_000 picoseconds. + Weight::from_parts(26_448_000, 0) .saturating_add(Weight::from_parts(0, 6152)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: // Measured: `206` - // Estimated: `8621` - // Minimum execution time: 9_307_000 picoseconds. - Weight::from_parts(9_691_000, 0) - .saturating_add(Weight::from_parts(0, 8621)) - .saturating_add(T::DbWeight::get().reads(3)) + // Estimated: `11096` + // Minimum execution time: 11_584_000 picoseconds. + Weight::from_parts(12_080_000, 0) + .saturating_add(Weight::from_parts(0, 11096)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `173` - // Estimated: `11063` - // Minimum execution time: 17_607_000 picoseconds. - Weight::from_parts(18_090_000, 0) - .saturating_add(Weight::from_parts(0, 11063)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `170` + // Estimated: `13535` + // Minimum execution time: 19_157_000 picoseconds. + Weight::from_parts(19_513_000, 0) + .saturating_add(Weight::from_parts(0, 13535)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -347,12 +347,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `215` - // Estimated: `11105` - // Minimum execution time: 34_322_000 picoseconds. - Weight::from_parts(35_754_000, 0) - .saturating_add(Weight::from_parts(0, 11105)) - .saturating_add(T::DbWeight::get().reads(10)) + // Measured: `212` + // Estimated: `13577` + // Minimum execution time: 34_878_000 picoseconds. + Weight::from_parts(35_623_000, 0) + .saturating_add(Weight::from_parts(0, 13577)) + .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -363,8 +363,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `1588` - // Minimum execution time: 4_513_000 picoseconds. - Weight::from_parts(4_754_000, 0) + // Minimum execution time: 3_900_000 picoseconds. + Weight::from_parts(4_161_000, 0) .saturating_add(Weight::from_parts(0, 1588)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -375,10 +375,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7740` // Estimated: `11205` - // Minimum execution time: 27_860_000 picoseconds. - Weight::from_parts(28_279_000, 0) + // Minimum execution time: 25_731_000 picoseconds. + Weight::from_parts(26_160_000, 0) .saturating_add(Weight::from_parts(0, 11205)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) + /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn claim_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `160` + // Estimated: `3625` + // Minimum execution time: 37_251_000 picoseconds. + Weight::from_parts(38_075_000, 0) + .saturating_add(Weight::from_parts(0, 3625)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs index 5f6e4c007cd9824f10d4b324513366a84ed1acf1..aa8c3cf2f14db33e119a355bff2e61ccc9a2c23d 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs @@ -22,8 +22,8 @@ use asset_hub_westend_runtime::{ xcm_config::{ bridging, AssetFeeAsExistentialDepositMultiplierFeeCharger, CheckingAccount, ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger, ForeignCreatorsSovereignAccountOf, - LocationToAccountId, TrustBackedAssetsPalletLocation, TrustBackedAssetsPalletLocationV3, - WestendLocation, WestendLocationV3, XcmConfig, + LocationToAccountId, StakingPot, TrustBackedAssetsPalletLocation, + TrustBackedAssetsPalletLocationV3, WestendLocation, WestendLocationV3, XcmConfig, }, AllPalletsWithoutSystem, Assets, Balances, ExistentialDeposit, ForeignAssets, ForeignAssetsInstance, MetadataDepositBase, MetadataDepositPerByte, ParachainSystem, @@ -50,11 +50,11 @@ use frame_support::{ use parachains_common::{AccountId, AssetIdForTrustBackedAssets, AuraId, Balance}; use sp_consensus_aura::SlotDuration; use sp_runtime::traits::MaybeEquivalence; -use std::convert::Into; +use std::{convert::Into, ops::Mul}; use testnet_parachains_constants::westend::{consensus::*, currency::UNITS, fee::WeightToFee}; use xcm::latest::prelude::{Assets as XcmAssets, *}; use xcm_builder::V4V3LocationConverter; -use xcm_executor::traits::{JustTry, WeightTrader}; +use xcm_executor::traits::{ConvertLocation, JustTry, WeightTrader}; const ALICE: [u8; 32] = [1u8; 32]; const SOME_ASSET_ADMIN: [u8; 32] = [5u8; 32]; @@ -86,6 +86,52 @@ fn slot_durations() -> SlotDurations { } } +fn setup_pool_for_paying_fees_with_foreign_assets( + (foreign_asset_owner, foreign_asset_id_location, foreign_asset_id_minimum_balance): ( + AccountId, + xcm::v3::Location, + Balance, + ), +) { + let existential_deposit = ExistentialDeposit::get(); + + // setup a pool to pay fees with `foreign_asset_id_location` tokens + let pool_owner: AccountId = [14u8; 32].into(); + let native_asset = xcm::v3::Location::parent(); + let pool_liquidity: Balance = + existential_deposit.max(foreign_asset_id_minimum_balance).mul(100_000); + + let _ = Balances::force_set_balance( + RuntimeOrigin::root(), + pool_owner.clone().into(), + (existential_deposit + pool_liquidity).mul(2).into(), + ); + + assert_ok!(ForeignAssets::mint( + RuntimeOrigin::signed(foreign_asset_owner), + foreign_asset_id_location.into(), + pool_owner.clone().into(), + (foreign_asset_id_minimum_balance + pool_liquidity).mul(2).into(), + )); + + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(pool_owner.clone()), + Box::new(native_asset.into()), + Box::new(foreign_asset_id_location.into()) + )); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(pool_owner.clone()), + Box::new(native_asset.into()), + Box::new(foreign_asset_id_location.into()), + pool_liquidity, + pool_liquidity, + 1, + 1, + pool_owner, + )); +} + #[test] fn test_buy_and_refund_weight_in_native() { ExtBuilder::::default() @@ -1071,30 +1117,133 @@ fn limited_reserve_transfer_assets_for_native_asset_to_asset_hub_rococo_works() Some(xcm_config::TreasuryAccount::get()), ) } + #[test] -fn receive_reserve_asset_deposited_roc_from_asset_hub_rococo_works() { +fn receive_reserve_asset_deposited_roc_from_asset_hub_rococo_fees_paid_by_pool_swap_works() { const BLOCK_AUTHOR_ACCOUNT: [u8; 32] = [13; 32]; + let block_author_account = AccountId::from(BLOCK_AUTHOR_ACCOUNT); + let staking_pot = StakingPot::get(); + + let foreign_asset_id_location = + xcm::v3::Location::new(2, [xcm::v3::Junction::GlobalConsensus(xcm::v3::NetworkId::Rococo)]); + let foreign_asset_id_minimum_balance = 1_000_000_000; + // sovereign account as foreign asset owner (can be whoever for this scenario) + let foreign_asset_owner = LocationToAccountId::convert_location(&Location::parent()).unwrap(); + let foreign_asset_create_params = + (foreign_asset_owner, foreign_asset_id_location, foreign_asset_id_minimum_balance); + asset_test_utils::test_cases_over_bridge::receive_reserve_asset_deposited_from_different_consensus_works::< Runtime, AllPalletsWithoutSystem, XcmConfig, - LocationToAccountId, ForeignAssetsInstance, >( collator_session_keys().add(collator_session_key(BLOCK_AUTHOR_ACCOUNT)), ExistentialDeposit::get(), AccountId::from([73; 32]), - AccountId::from(BLOCK_AUTHOR_ACCOUNT), + block_author_account.clone(), // receiving ROCs - (xcm::v3::Location::new(2, [xcm::v3::Junction::GlobalConsensus(xcm::v3::NetworkId::Rococo)]), 1000000000000, 1_000_000_000), - bridging_to_asset_hub_rococo, + foreign_asset_create_params.clone(), + 1000000000000, + || { + // setup pool for paying fees to touch `SwapFirstAssetTrader` + setup_pool_for_paying_fees_with_foreign_assets(foreign_asset_create_params); + // staking pot account for collecting local native fees from `BuyExecution` + let _ = Balances::force_set_balance(RuntimeOrigin::root(), StakingPot::get().into(), ExistentialDeposit::get()); + // prepare bridge configuration + bridging_to_asset_hub_rococo() + }, ( [PalletInstance(bp_bridge_hub_westend::WITH_BRIDGE_WESTEND_TO_ROCOCO_MESSAGES_PALLET_INDEX)].into(), GlobalConsensus(Rococo), [Parachain(1000)].into() - ) + ), + || { + // check staking pot for ED + assert_eq!(Balances::free_balance(&staking_pot), ExistentialDeposit::get()); + // check now foreign asset for staking pot + assert_eq!( + ForeignAssets::balance( + foreign_asset_id_location.into(), + &staking_pot + ), + 0 + ); + }, + || { + // `SwapFirstAssetTrader` - staking pot receives xcm fees in ROCs + assert!( + Balances::free_balance(&staking_pot) > ExistentialDeposit::get() + ); + // staking pot receives no foreign assets + assert_eq!( + ForeignAssets::balance( + foreign_asset_id_location.into(), + &staking_pot + ), + 0 + ); + } ) } + +#[test] +fn receive_reserve_asset_deposited_roc_from_asset_hub_rococo_fees_paid_by_sufficient_asset_works() { + const BLOCK_AUTHOR_ACCOUNT: [u8; 32] = [13; 32]; + let block_author_account = AccountId::from(BLOCK_AUTHOR_ACCOUNT); + let staking_pot = StakingPot::get(); + + let foreign_asset_id_location = + xcm::v3::Location::new(2, [xcm::v3::Junction::GlobalConsensus(xcm::v3::NetworkId::Rococo)]); + let foreign_asset_id_minimum_balance = 1_000_000_000; + // sovereign account as foreign asset owner (can be whoever for this scenario) + let foreign_asset_owner = LocationToAccountId::convert_location(&Location::parent()).unwrap(); + let foreign_asset_create_params = + (foreign_asset_owner, foreign_asset_id_location, foreign_asset_id_minimum_balance); + + asset_test_utils::test_cases_over_bridge::receive_reserve_asset_deposited_from_different_consensus_works::< + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + ForeignAssetsInstance, + >( + collator_session_keys().add(collator_session_key(BLOCK_AUTHOR_ACCOUNT)), + ExistentialDeposit::get(), + AccountId::from([73; 32]), + block_author_account.clone(), + // receiving ROCs + foreign_asset_create_params, + 1000000000000, + bridging_to_asset_hub_rococo, + ( + [PalletInstance(bp_bridge_hub_westend::WITH_BRIDGE_WESTEND_TO_ROCOCO_MESSAGES_PALLET_INDEX)].into(), + GlobalConsensus(Rococo), + [Parachain(1000)].into() + ), + || { + // check block author before + assert_eq!( + ForeignAssets::balance( + foreign_asset_id_location.into(), + &block_author_account + ), + 0 + ); + }, + || { + // `TakeFirstAssetTrader` puts fees to the block author + assert!( + ForeignAssets::balance( + foreign_asset_id_location.into(), + &block_author_account + ) > 0 + ); + // `SwapFirstAssetTrader` did not work + assert_eq!(Balances::free_balance(&staking_pot), 0); + } + ) +} + #[test] fn report_bridge_status_from_xcm_bridge_router_for_rococo_works() { asset_test_utils::test_cases_over_bridge::report_bridge_status_from_xcm_bridge_router_works::< diff --git a/cumulus/parachains/runtimes/assets/common/src/lib.rs b/cumulus/parachains/runtimes/assets/common/src/lib.rs index a0c912b6f0fe3643f1f516b311fdfd5d401db4d0..fa2752179eb6fd238eb8596d8e3ebddf947680d3 100644 --- a/cumulus/parachains/runtimes/assets/common/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/common/src/lib.rs @@ -371,7 +371,7 @@ mod tests { for (asset, expected_result) in test_data { assert_eq!( - >::matches_fungibles( + >::matches_fungibles( &asset.clone().try_into().unwrap() ), expected_result, diff --git a/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml b/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml index f1b3953ce473bf5dc27d63eb9a9404fb9086979d..883c93c97b4de6774e86ee83b84d246dc1427f7f 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml @@ -16,8 +16,8 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = frame-support = { path = "../../../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../../../substrate/frame/system", default-features = false } pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false } -pallet-asset-conversion = { path = "../../../../../substrate/frame/asset-conversion", default-features = false } pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } +pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } sp-io = { path = "../../../../../substrate/primitives/io", default-features = false } sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } @@ -56,11 +56,11 @@ std = [ "cumulus-primitives-core/std", "frame-support/std", "frame-system/std", - "pallet-asset-conversion/std", "pallet-assets/std", "pallet-balances/std", "pallet-collator-selection/std", "pallet-session/std", + "pallet-timestamp/std", "pallet-xcm-bridge-hub-router/std", "pallet-xcm/std", "parachain-info/std", diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs index 4007d926983ed97a8cae705428c1d61184173cb7..53e10956bd0d1d233555a8e4df1ae23fe351c678 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs @@ -70,7 +70,8 @@ pub fn teleports_for_native_asset_works< + parachain_info::Config + pallet_collator_selection::Config + cumulus_pallet_parachain_system::Config - + cumulus_pallet_xcmp_queue::Config, + + cumulus_pallet_xcmp_queue::Config + + pallet_timestamp::Config, AllPalletsWithoutSystem: OnInitialize> + OnFinalize>, AccountIdOf: Into<[u8; 32]>, @@ -350,7 +351,8 @@ pub fn teleports_for_foreign_assets_works< + pallet_collator_selection::Config + cumulus_pallet_parachain_system::Config + cumulus_pallet_xcmp_queue::Config - + pallet_assets::Config, + + pallet_assets::Config + + pallet_timestamp::Config, AllPalletsWithoutSystem: OnInitialize> + OnFinalize>, AccountIdOf: Into<[u8; 32]>, @@ -701,7 +703,8 @@ pub fn asset_transactor_transfer_with_local_consensus_currency_works: Into<[u8; 32]>, ValidatorIdOf: From>, BalanceOf: From, @@ -826,7 +829,8 @@ pub fn asset_transactor_transfer_with_pallet_assets_instance_works< + parachain_info::Config + pallet_collator_selection::Config + cumulus_pallet_parachain_system::Config - + pallet_assets::Config, + + pallet_assets::Config + + pallet_timestamp::Config, AccountIdOf: Into<[u8; 32]>, ValidatorIdOf: From>, BalanceOf: From, @@ -1093,7 +1097,8 @@ pub fn create_and_manage_foreign_assets_for_local_consensus_parachain_assets_wor + parachain_info::Config + pallet_collator_selection::Config + cumulus_pallet_parachain_system::Config - + pallet_assets::Config, + + pallet_assets::Config + + pallet_timestamp::Config, AccountIdOf: Into<[u8; 32]>, ValidatorIdOf: From>, BalanceOf: From, @@ -1422,7 +1427,8 @@ pub fn reserve_transfer_native_asset_to_non_teleport_para_works< + parachain_info::Config + pallet_collator_selection::Config + cumulus_pallet_parachain_system::Config - + cumulus_pallet_xcmp_queue::Config, + + cumulus_pallet_xcmp_queue::Config + + pallet_timestamp::Config, AllPalletsWithoutSystem: OnInitialize> + OnFinalize>, AccountIdOf: Into<[u8; 32]>, diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs index 4a0f31c449ea61d00b04110008d9984e5e87cbd7..1cce3b647cf0446a2246417b5383594fb501e600 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs @@ -30,7 +30,6 @@ use parachains_runtimes_test_utils::{ SlotDurations, ValidatorIdOf, XcmReceivedFrom, }; use sp_runtime::{traits::StaticLookup, Saturating}; -use sp_std::ops::Mul; use xcm::{latest::prelude::*, VersionedAssets}; use xcm_builder::{CreateMatcher, MatchXcm}; use xcm_executor::{traits::ConvertLocation, XcmExecutor}; @@ -71,7 +70,8 @@ pub fn limited_reserve_transfer_assets_for_native_asset_works< + parachain_info::Config + pallet_collator_selection::Config + cumulus_pallet_parachain_system::Config - + cumulus_pallet_xcmp_queue::Config, + + cumulus_pallet_xcmp_queue::Config + + pallet_timestamp::Config, AllPalletsWithoutSystem: OnInitialize> + OnFinalize>, AccountIdOf: Into<[u8; 32]>, @@ -323,20 +323,22 @@ pub fn receive_reserve_asset_deposited_from_different_consensus_works< Runtime, AllPalletsWithoutSystem, XcmConfig, - LocationToAccountId, ForeignAssetsPalletInstance, >( collator_session_keys: CollatorSessionKeys, existential_deposit: BalanceOf, target_account: AccountIdOf, block_author_account: AccountIdOf, - ( - foreign_asset_id_location, - transfered_foreign_asset_id_amount, - foreign_asset_id_minimum_balance, - ): (xcm::v3::Location, u128, u128), - prepare_configuration: fn() -> TestBridgingConfig, + (foreign_asset_owner, foreign_asset_id_location, foreign_asset_id_minimum_balance): ( + AccountIdOf, + xcm::v3::Location, + u128, + ), + foreign_asset_id_amount_to_transfer: u128, + prepare_configuration: impl FnOnce() -> TestBridgingConfig, (bridge_instance, universal_origin, descend_origin): (Junctions, Junction, Junctions), /* bridge adds origin manipulation on the way */ + additional_checks_before: impl FnOnce(), + additional_checks_after: impl FnOnce(), ) where Runtime: frame_system::Config + pallet_balances::Config @@ -347,14 +349,13 @@ pub fn receive_reserve_asset_deposited_from_different_consensus_works< + cumulus_pallet_parachain_system::Config + cumulus_pallet_xcmp_queue::Config + pallet_assets::Config - + pallet_asset_conversion::Config, + + pallet_timestamp::Config, AllPalletsWithoutSystem: OnInitialize> + OnFinalize>, AccountIdOf: Into<[u8; 32]> + From<[u8; 32]>, ValidatorIdOf: From>, BalanceOf: From + Into, XcmConfig: xcm_executor::Config, - LocationToAccountId: ConvertLocation>, >::AssetId: From + Into, >::AssetIdParameter: @@ -365,9 +366,6 @@ pub fn receive_reserve_asset_deposited_from_different_consensus_works< + Into, <::Lookup as StaticLookup>::Source: From<::AccountId>, - ::AssetKind: - From + Into, - ::Balance: From, ForeignAssetsPalletInstance: 'static, { ExtBuilder::::default() @@ -382,88 +380,31 @@ pub fn receive_reserve_asset_deposited_from_different_consensus_works< block_author_account.clone().into(), ); - // prepare bridge config - let TestBridgingConfig { local_bridge_hub_location, .. } = prepare_configuration(); - // drip 'ED' user target account let _ = >::deposit_creating( &target_account, existential_deposit, ); - // sovereign account as foreign asset owner (can be whoever for this scenario, doesnt - // matter) - let sovereign_account_as_owner_of_foreign_asset = - LocationToAccountId::convert_location(&Location::parent()).unwrap(); - - // staking pot account for collecting local native fees from `BuyExecution` - let staking_pot = >::account_id(); - let _ = >::deposit_creating( - &staking_pot, - existential_deposit, - ); - // create foreign asset for wrapped/derivated representation assert_ok!( >::force_create( RuntimeHelper::::root_origin(), foreign_asset_id_location.into(), - sovereign_account_as_owner_of_foreign_asset.clone().into(), + foreign_asset_owner.into(), true, // is_sufficient=true foreign_asset_id_minimum_balance.into() ) ); - // setup a pool to pay fees with `foreign_asset_id_location` tokens - let pool_owner: AccountIdOf = [1u8; 32].into(); - let native_asset = xcm::v3::Location::parent(); - let pool_liquidity: u128 = - existential_deposit.into().max(foreign_asset_id_minimum_balance).mul(100_000); - - let _ = >::deposit_creating( - &pool_owner, - (existential_deposit.into() + pool_liquidity).mul(2).into(), - ); - - assert_ok!(>::mint( - RuntimeHelper::::origin_of( - sovereign_account_as_owner_of_foreign_asset - ), - foreign_asset_id_location.into(), - pool_owner.clone().into(), - (foreign_asset_id_minimum_balance + pool_liquidity).mul(2).into(), - )); - - assert_ok!(>::create_pool( - RuntimeHelper::::origin_of(pool_owner.clone()), - Box::new(native_asset.into()), - Box::new(foreign_asset_id_location.into()) - )); - - assert_ok!(>::add_liquidity( - RuntimeHelper::::origin_of(pool_owner.clone()), - Box::new(native_asset.into()), - Box::new(foreign_asset_id_location.into()), - pool_liquidity.into(), - pool_liquidity.into(), - 1.into(), - 1.into(), - pool_owner, - )); + // prepare bridge config + let TestBridgingConfig { local_bridge_hub_location, .. } = prepare_configuration(); // Balances before assert_eq!( >::free_balance(&target_account), existential_deposit.clone() ); - assert_eq!( - >::free_balance(&block_author_account), - 0.into() - ); - assert_eq!( - >::free_balance(&staking_pot), - existential_deposit.clone() - ); // ForeignAssets balances before assert_eq!( @@ -473,27 +414,16 @@ pub fn receive_reserve_asset_deposited_from_different_consensus_works< ), 0.into() ); - assert_eq!( - >::balance( - foreign_asset_id_location.into(), - &block_author_account - ), - 0.into() - ); - assert_eq!( - >::balance( - foreign_asset_id_location.into(), - &staking_pot - ), - 0.into() - ); + + // additional check before + additional_checks_before(); let foreign_asset_id_location_latest: Location = foreign_asset_id_location.try_into().unwrap(); let expected_assets = Assets::from(vec![Asset { id: AssetId(foreign_asset_id_location_latest.clone()), - fun: Fungible(transfered_foreign_asset_id_amount), + fun: Fungible(foreign_asset_id_amount_to_transfer), }]); let expected_beneficiary = Location::new( 0, @@ -510,7 +440,7 @@ pub fn receive_reserve_asset_deposited_from_different_consensus_works< BuyExecution { fees: Asset { id: AssetId(foreign_asset_id_location_latest.clone()), - fun: Fungible(transfered_foreign_asset_id_amount), + fun: Fungible(foreign_asset_id_amount_to_transfer), }, weight_limit: Unlimited, }, @@ -544,19 +474,10 @@ pub fn receive_reserve_asset_deposited_from_different_consensus_works< assert_ok!(outcome.ensure_complete()); // Balances after - // staking pot receives xcm fees in dot - assert!( - >::free_balance(&staking_pot) != - existential_deposit - ); assert_eq!( >::free_balance(&target_account), existential_deposit.clone() ); - assert_eq!( - >::free_balance(&block_author_account), - 0.into() - ); // ForeignAssets balances after assert!( @@ -565,20 +486,9 @@ pub fn receive_reserve_asset_deposited_from_different_consensus_works< &target_account ) > 0.into() ); - assert_eq!( - >::balance( - foreign_asset_id_location.into(), - &staking_pot - ), - 0.into() - ); - assert_eq!( - >::balance( - foreign_asset_id_location.into(), - &block_author_account - ), - 0.into() - ); + + // additional check after + additional_checks_after(); }) } @@ -602,7 +512,8 @@ pub fn report_bridge_status_from_xcm_bridge_router_works< + pallet_collator_selection::Config + cumulus_pallet_parachain_system::Config + cumulus_pallet_xcmp_queue::Config - + pallet_xcm_bridge_hub_router::Config, + + pallet_xcm_bridge_hub_router::Config + + pallet_timestamp::Config, AllPalletsWithoutSystem: OnInitialize> + OnFinalize>, AccountIdOf: Into<[u8; 32]>, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index 02e51782700b8e3fc8dd4172935646a9ca7c8fbe..7a1951fd24bd2d74ae722eef31b81dfdb28c8d9b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -21,7 +21,7 @@ log = { workspace = true } scale-info = { version = "2.10.0", default-features = false, features = [ "derive", ] } -serde = { version = "1.0.196", optional = true, features = ["derive"] } +serde = { optional = true, features = ["derive"], workspace = true, default-features = true } # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index d1eac089f670ac9d72b33991bfd87dd9c02c4f7a..0b48d1717fa96c4854371e44ae792d0ce41dc2e2 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -102,8 +102,6 @@ use polkadot_runtime_common::prod_or_fast; #[cfg(feature = "runtime-benchmarks")] use benchmark_helpers::DoNothingRouter; -#[cfg(not(feature = "runtime-benchmarks"))] -use bridge_hub_common::BridgeHubMessageRouter; /// The address format for describing accounts. pub type Address = MultiAddress; @@ -170,12 +168,12 @@ impl frame_support::traits::OnRuntimeUpgrade for InitStorageVersions { let mut writes = 0; if PolkadotXcm::on_chain_storage_version() == StorageVersion::new(0) { - PolkadotXcm::current_storage_version().put::(); + PolkadotXcm::in_code_storage_version().put::(); writes.saturating_inc(); } if Balances::on_chain_storage_version() == StorageVersion::new(0) { - Balances::current_storage_version().put::(); + Balances::in_code_storage_version().put::(); writes.saturating_inc(); } @@ -204,7 +202,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("bridge-hub-rococo"), impl_name: create_runtime_str!("bridge-hub-rococo"), authoring_version: 1, - spec_version: 1_007_000, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 4, @@ -367,11 +365,15 @@ parameter_types! { impl pallet_message_queue::Config for Runtime { type RuntimeEvent = RuntimeEvent; type WeightInfo = weights::pallet_message_queue::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] + // Use the NoopMessageProcessor exclusively for benchmarks, not for tests with the + // runtime-benchmarks feature as tests require the BridgeHubMessageRouter to process messages. + // The "test" feature flag doesn't work, hence the reliance on the "std" feature, which is + // enabled during tests. + #[cfg(all(not(feature = "std"), feature = "runtime-benchmarks"))] type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor; - #[cfg(not(feature = "runtime-benchmarks"))] - type MessageProcessor = BridgeHubMessageRouter< + #[cfg(not(all(not(feature = "std"), feature = "runtime-benchmarks")))] + type MessageProcessor = bridge_hub_common::BridgeHubMessageRouter< xcm_builder::ProcessXcmMessage< AggregateMessageOrigin, xcm_executor::XcmExecutor, @@ -730,7 +732,7 @@ construct_runtime!( // Message Queue. Importantly, is registered last so that messages are processed after // the `on_initialize` hooks of bridging pallets. - MessageQueue: pallet_message_queue = 250, + MessageQueue: pallet_message_queue = 175, } ); @@ -814,7 +816,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } @@ -1109,6 +1111,12 @@ impl_runtime_apis! { use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { + type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForParentDelivery, + >; + fn reachable_dest() -> Option { Some(Parent.into()) } @@ -1117,7 +1125,7 @@ impl_runtime_apis! { // Relay/native token can be teleported between BH and Relay. Some(( Asset { - fun: Fungible(EXISTENTIAL_DEPOSIT), + fun: Fungible(ExistentialDeposit::get()), id: AssetId(Parent.into()) }, Parent.into(), @@ -1140,6 +1148,13 @@ impl_runtime_apis! { dest ) } + + fn get_asset() -> Asset { + Asset { + id: AssetId(Location::parent()), + fun: Fungible(ExistentialDeposit::get()), + } + } } use xcm::latest::prelude::*; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs index 5faded42aa82df52f403b68de2a470ad4a5a17b7..a732e1a573439c4b658191697024ac3c396c9de5 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_xcm` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-05, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-r43aesjn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,8 +64,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 23_683_000 picoseconds. - Weight::from_parts(24_199_000, 0) + // Minimum execution time: 18_513_000 picoseconds. + Weight::from_parts(19_156_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -90,8 +90,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3593` - // Minimum execution time: 89_524_000 picoseconds. - Weight::from_parts(91_401_000, 0) + // Minimum execution time: 88_096_000 picoseconds. + Weight::from_parts(89_732_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -126,8 +126,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3593` - // Minimum execution time: 91_890_000 picoseconds. - Weight::from_parts(93_460_000, 0) + // Minimum execution time: 88_239_000 picoseconds. + Weight::from_parts(89_729_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -148,8 +148,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_152_000 picoseconds. - Weight::from_parts(7_355_000, 0) + // Minimum execution time: 5_955_000 picoseconds. + Weight::from_parts(6_266_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -159,8 +159,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_081_000 picoseconds. - Weight::from_parts(2_258_000, 0) + // Minimum execution time: 1_868_000 picoseconds. + Weight::from_parts(1_961_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -186,8 +186,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 28_067_000 picoseconds. - Weight::from_parts(28_693_000, 0) + // Minimum execution time: 24_388_000 picoseconds. + Weight::from_parts(25_072_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -212,8 +212,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `255` // Estimated: `3720` - // Minimum execution time: 30_420_000 picoseconds. - Weight::from_parts(31_373_000, 0) + // Minimum execution time: 26_762_000 picoseconds. + Weight::from_parts(27_631_000, 0) .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -224,45 +224,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_087_000 picoseconds. - Weight::from_parts(2_243_000, 0) + // Minimum execution time: 1_856_000 picoseconds. + Weight::from_parts(2_033_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:4 w:2) + /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `95` - // Estimated: `10985` - // Minimum execution time: 15_142_000 picoseconds. - Weight::from_parts(15_598_000, 0) - .saturating_add(Weight::from_parts(0, 10985)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `89` + // Estimated: `13454` + // Minimum execution time: 17_718_000 picoseconds. + Weight::from_parts(18_208_000, 0) + .saturating_add(Weight::from_parts(0, 13454)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `99` - // Estimated: `10989` - // Minimum execution time: 15_041_000 picoseconds. - Weight::from_parts(15_493_000, 0) - .saturating_add(Weight::from_parts(0, 10989)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `93` + // Estimated: `13458` + // Minimum execution time: 17_597_000 picoseconds. + Weight::from_parts(18_090_000, 0) + .saturating_add(Weight::from_parts(0, 13458)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `13471` - // Minimum execution time: 16_624_000 picoseconds. - Weight::from_parts(17_031_000, 0) - .saturating_add(Weight::from_parts(0, 13471)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15946` + // Minimum execution time: 19_533_000 picoseconds. + Weight::from_parts(20_164_000, 0) + .saturating_add(Weight::from_parts(0, 15946)) + .saturating_add(T::DbWeight::get().reads(6)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -282,36 +282,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 26_398_000 picoseconds. - Weight::from_parts(26_847_000, 0) + // Minimum execution time: 24_958_000 picoseconds. + Weight::from_parts(25_628_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: // Measured: `136` - // Estimated: `8551` - // Minimum execution time: 8_741_000 picoseconds. - Weight::from_parts(8_954_000, 0) - .saturating_add(Weight::from_parts(0, 8551)) - .saturating_add(T::DbWeight::get().reads(3)) + // Estimated: `11026` + // Minimum execution time: 12_209_000 picoseconds. + Weight::from_parts(12_612_000, 0) + .saturating_add(Weight::from_parts(0, 11026)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `10996` - // Minimum execution time: 15_306_000 picoseconds. - Weight::from_parts(15_760_000, 0) - .saturating_add(Weight::from_parts(0, 10996)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `100` + // Estimated: `13465` + // Minimum execution time: 17_844_000 picoseconds. + Weight::from_parts(18_266_000, 0) + .saturating_add(Weight::from_parts(0, 13465)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -327,12 +327,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `112` - // Estimated: `11002` - // Minimum execution time: 33_127_000 picoseconds. - Weight::from_parts(33_938_000, 0) - .saturating_add(Weight::from_parts(0, 11002)) - .saturating_add(T::DbWeight::get().reads(10)) + // Measured: `106` + // Estimated: `13471` + // Minimum execution time: 34_131_000 picoseconds. + Weight::from_parts(34_766_000, 0) + .saturating_add(Weight::from_parts(0, 13471)) + .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -343,8 +343,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 4_290_000 picoseconds. - Weight::from_parts(4_450_000, 0) + // Minimum execution time: 3_525_000 picoseconds. + Weight::from_parts(3_724_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -355,10 +355,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 26_408_000 picoseconds. - Weight::from_parts(26_900_000, 0) + // Minimum execution time: 24_975_000 picoseconds. + Weight::from_parts(25_517_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) + /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn claim_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `90` + // Estimated: `3555` + // Minimum execution time: 33_761_000 picoseconds. + Weight::from_parts(34_674_000, 0) + .saturating_add(Weight::from_parts(0, 3555)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs index b2e5cc9ef06fe827749416ce357ce28c8453f9b9..239bd946e759b1c8aa1e892912b5bce8093bde00 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs @@ -20,9 +20,9 @@ use bp_polkadot_core::Signature; use bridge_hub_rococo_runtime::{ bridge_to_bulletin_config::OnBridgeHubRococoRefundRococoBulletinMessages, bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages, - xcm_config::XcmConfig, BridgeRejectObsoleteHeadersAndMessages, Executive, - MessageQueueServiceWeight, Runtime, RuntimeCall, RuntimeEvent, SessionKeys, SignedExtra, - UncheckedExtrinsic, + xcm_config::XcmConfig, AllPalletsWithoutSystem, BridgeRejectObsoleteHeadersAndMessages, + Executive, MessageQueueServiceWeight, Runtime, RuntimeCall, RuntimeEvent, SessionKeys, + SignedExtra, UncheckedExtrinsic, }; use codec::{Decode, Encode}; use cumulus_primitives_core::XcmError::{FailedToTransactAsset, NotHoldingFees}; @@ -51,6 +51,7 @@ fn collator_session_keys() -> bridge_hub_test_utils::CollatorSessionKeys( + 11155111, collator_session_keys(), 1013, 1000, @@ -69,6 +70,7 @@ pub fn transfer_token_to_ethereum_works() { #[test] pub fn unpaid_transfer_token_to_ethereum_fails_with_barrier() { snowbridge_runtime_test_common::send_unpaid_transfer_token_message::( + 11155111, collator_session_keys(), 1013, 1000, @@ -80,6 +82,7 @@ pub fn unpaid_transfer_token_to_ethereum_fails_with_barrier() { #[test] pub fn transfer_token_to_ethereum_fee_not_enough() { snowbridge_runtime_test_common::send_transfer_token_message_failure::( + 11155111, collator_session_keys(), 1013, 1000, @@ -95,6 +98,7 @@ pub fn transfer_token_to_ethereum_fee_not_enough() { #[test] pub fn transfer_token_to_ethereum_insufficient_fund() { snowbridge_runtime_test_common::send_transfer_token_message_failure::( + 11155111, collator_session_keys(), 1013, 1000, @@ -135,6 +139,33 @@ fn ethereum_to_polkadot_message_extrinsics_work() { ); } +/// Tests that the digest items are as expected when a Ethereum Outbound message is received. +/// If the MessageQueue pallet is configured before (i.e. the MessageQueue pallet is listed before +/// the EthereumOutboundQueue in the construct_runtime macro) the EthereumOutboundQueue, this test +/// will fail. +#[test] +pub fn ethereum_outbound_queue_processes_messages_before_message_queue_works() { + snowbridge_runtime_test_common::ethereum_outbound_queue_processes_messages_before_message_queue_works::< + Runtime, + XcmConfig, + AllPalletsWithoutSystem, + >( + 11155111, + collator_session_keys(), + 1013, + 1000, + H160::random(), + H160::random(), + DefaultBridgeHubEthereumBaseFee::get(), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::EthereumOutboundQueue(event)) => Some(event), + _ => None, + } + }), + ) +} + fn construct_extrinsic( sender: sp_keyring::AccountKeyring, call: RuntimeCall, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml index 6015251a238acb30495e007e78954e9d6cea31ec..8623f7cb366ecf3930b25563d6a912e53240d999 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml @@ -17,7 +17,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = hex-literal = { version = "0.4.1" } log = { workspace = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", optional = true, features = ["derive"] } +serde = { optional = true, features = ["derive"], workspace = true, default-features = true } # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 8ffea24b3cc7c13d2b8236b9783510bc15cb4849..e1344fce63dc9bad063c5b7f0d620a9a10bbf666 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -142,12 +142,12 @@ impl frame_support::traits::OnRuntimeUpgrade for InitStorageVersions { let mut writes = 0; if PolkadotXcm::on_chain_storage_version() == StorageVersion::new(0) { - PolkadotXcm::current_storage_version().put::(); + PolkadotXcm::in_code_storage_version().put::(); writes.saturating_inc(); } if Balances::on_chain_storage_version() == StorageVersion::new(0) { - Balances::current_storage_version().put::(); + Balances::in_code_storage_version().put::(); writes.saturating_inc(); } @@ -176,7 +176,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("bridge-hub-westend"), impl_name: create_runtime_str!("bridge-hub-westend"), authoring_version: 1, - spec_version: 1_007_000, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 4, @@ -522,6 +522,7 @@ mod benches { [pallet_utility, Utility] [pallet_timestamp, Timestamp] [pallet_collator_selection, CollatorSelection] + [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] // XCM [pallet_xcm, PalletXcmExtrinsicsBenchmark::] @@ -565,7 +566,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } @@ -805,6 +806,12 @@ impl_runtime_apis! { use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { + type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForParentDelivery, + >; + fn reachable_dest() -> Option { Some(Parent.into()) } @@ -813,7 +820,7 @@ impl_runtime_apis! { // Relay/native token can be teleported between BH and Relay. Some(( Asset { - fun: Fungible(EXISTENTIAL_DEPOSIT), + fun: Fungible(ExistentialDeposit::get()), id: AssetId(Parent.into()) }, Parent.into(), @@ -836,6 +843,13 @@ impl_runtime_apis! { dest ) } + + fn get_asset() -> Asset { + Asset { + id: AssetId(Location::parent()), + fun: Fungible(ExistentialDeposit::get()), + } + } } use xcm::latest::prelude::*; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs index 83e4260e77198355d23ea0c38481d7b8e68267c7..a78ff2355efaf06562e44828a8df0730481d4098 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_xcm` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-05, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-r43aesjn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,8 +64,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 23_219_000 picoseconds. - Weight::from_parts(23_818_000, 0) + // Minimum execution time: 19_527_000 picoseconds. + Weight::from_parts(19_839_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -88,10 +88,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn teleport_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `70` + // Measured: `107` // Estimated: `3593` - // Minimum execution time: 90_120_000 picoseconds. - Weight::from_parts(92_545_000, 0) + // Minimum execution time: 90_938_000 picoseconds. + Weight::from_parts(92_822_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -124,10 +124,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn transfer_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `70` + // Measured: `107` // Estimated: `3593` - // Minimum execution time: 91_339_000 picoseconds. - Weight::from_parts(93_204_000, 0) + // Minimum execution time: 90_133_000 picoseconds. + Weight::from_parts(92_308_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -148,8 +148,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_976_000 picoseconds. - Weight::from_parts(7_284_000, 0) + // Minimum execution time: 6_205_000 picoseconds. + Weight::from_parts(6_595_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -159,8 +159,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_044_000 picoseconds. - Weight::from_parts(2_223_000, 0) + // Minimum execution time: 1_927_000 picoseconds. + Weight::from_parts(2_062_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -186,8 +186,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 27_778_000 picoseconds. - Weight::from_parts(28_318_000, 0) + // Minimum execution time: 25_078_000 picoseconds. + Weight::from_parts(25_782_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -212,8 +212,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `255` // Estimated: `3720` - // Minimum execution time: 30_446_000 picoseconds. - Weight::from_parts(31_925_000, 0) + // Minimum execution time: 28_188_000 picoseconds. + Weight::from_parts(28_826_000, 0) .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -224,45 +224,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_037_000 picoseconds. - Weight::from_parts(2_211_000, 0) + // Minimum execution time: 1_886_000 picoseconds. + Weight::from_parts(1_991_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:4 w:2) + /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `95` - // Estimated: `10985` - // Minimum execution time: 15_620_000 picoseconds. - Weight::from_parts(15_984_000, 0) - .saturating_add(Weight::from_parts(0, 10985)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `89` + // Estimated: `13454` + // Minimum execution time: 17_443_000 picoseconds. + Weight::from_parts(17_964_000, 0) + .saturating_add(Weight::from_parts(0, 13454)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `99` - // Estimated: `10989` - // Minimum execution time: 15_689_000 picoseconds. - Weight::from_parts(16_093_000, 0) - .saturating_add(Weight::from_parts(0, 10989)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `93` + // Estimated: `13458` + // Minimum execution time: 17_357_000 picoseconds. + Weight::from_parts(18_006_000, 0) + .saturating_add(Weight::from_parts(0, 13458)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `13471` - // Minimum execution time: 16_946_000 picoseconds. - Weight::from_parts(17_192_000, 0) - .saturating_add(Weight::from_parts(0, 13471)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15946` + // Minimum execution time: 18_838_000 picoseconds. + Weight::from_parts(19_688_000, 0) + .saturating_add(Weight::from_parts(0, 15946)) + .saturating_add(T::DbWeight::get().reads(6)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -282,36 +282,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 27_164_000 picoseconds. - Weight::from_parts(27_760_000, 0) + // Minimum execution time: 25_517_000 picoseconds. + Weight::from_parts(26_131_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: // Measured: `136` - // Estimated: `8551` - // Minimum execution time: 8_689_000 picoseconds. - Weight::from_parts(8_874_000, 0) - .saturating_add(Weight::from_parts(0, 8551)) - .saturating_add(T::DbWeight::get().reads(3)) + // Estimated: `11026` + // Minimum execution time: 11_587_000 picoseconds. + Weight::from_parts(11_963_000, 0) + .saturating_add(Weight::from_parts(0, 11026)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `10996` - // Minimum execution time: 15_944_000 picoseconds. - Weight::from_parts(16_381_000, 0) - .saturating_add(Weight::from_parts(0, 10996)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `100` + // Estimated: `13465` + // Minimum execution time: 17_490_000 picoseconds. + Weight::from_parts(18_160_000, 0) + .saturating_add(Weight::from_parts(0, 13465)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -327,12 +327,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `112` - // Estimated: `11002` - // Minimum execution time: 33_826_000 picoseconds. - Weight::from_parts(34_784_000, 0) - .saturating_add(Weight::from_parts(0, 11002)) - .saturating_add(T::DbWeight::get().reads(10)) + // Measured: `106` + // Estimated: `13471` + // Minimum execution time: 34_088_000 picoseconds. + Weight::from_parts(34_598_000, 0) + .saturating_add(Weight::from_parts(0, 13471)) + .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -343,8 +343,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 4_257_000 picoseconds. - Weight::from_parts(4_383_000, 0) + // Minimum execution time: 3_566_000 picoseconds. + Weight::from_parts(3_754_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -355,10 +355,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 26_924_000 picoseconds. - Weight::from_parts(27_455_000, 0) + // Minimum execution time: 25_078_000 picoseconds. + Weight::from_parts(25_477_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) + /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn claim_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `90` + // Estimated: `3555` + // Minimum execution time: 34_661_000 picoseconds. + Weight::from_parts(35_411_000, 0) + .saturating_add(Weight::from_parts(0, 3555)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml index d34b5cd0eed1ef3f9c8192f0eb0bd3fdfdfa2abf..5f2a6e050d83c3db662f8ff4896d32dc8a28fde3 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml @@ -25,6 +25,7 @@ sp-std = { path = "../../../../../substrate/primitives/std", default-features = sp-tracing = { path = "../../../../../substrate/primitives/tracing" } pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } +pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } # Cumulus asset-test-utils = { path = "../../assets/test-utils" } @@ -73,6 +74,7 @@ std = [ "pallet-bridge-messages/std", "pallet-bridge-parachains/std", "pallet-bridge-relayers/std", + "pallet-timestamp/std", "pallet-utility/std", "parachains-common/std", "parachains-runtimes-test-utils/std", diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs index 4f634c184aa84faa6466102ef5fee30f254bad43..2b48f2e3d515f625532d9c5f50fabadb9a89517a 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs @@ -197,7 +197,9 @@ where pub(crate) fn initialize_bridge_grandpa_pallet( init_data: bp_header_chain::InitializationData>, ) where - Runtime: BridgeGrandpaConfig, + Runtime: BridgeGrandpaConfig + + cumulus_pallet_parachain_system::Config + + pallet_timestamp::Config, { pallet_bridge_grandpa::Pallet::::initialize( RuntimeHelper::::root_origin(), diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/impls.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/impls.rs index caf0cddec664a55ce080ed6052a9cdf42a5e2f28..e5b176fc77873805fb0e4ed6dba74d720ea3479a 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/impls.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/impls.rs @@ -81,7 +81,7 @@ where } fn proposal_of(proposal_hash: HashOf) -> Option> { - pallet_collective::Pallet::::proposal_of(proposal_hash) + pallet_collective::ProposalOf::::get(proposal_hash) } } diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index 487ee2660de90a8b5eb1611daf181f3c1f37293e..9db729ef9bf4c6e2c378b34b8ffa21abbd9a3077 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -117,7 +117,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("collectives-westend"), impl_name: create_runtime_str!("collectives-westend"), authoring_version: 1, - spec_version: 1_007_000, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 5, @@ -809,7 +809,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } @@ -992,8 +992,21 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + parameter_types! { + pub ExistentialDepositAsset: Option = Some(( + xcm_config::WndLocation::get(), + ExistentialDeposit::get() + ).into()); + } + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { + type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForParentDelivery, + >; + fn reachable_dest() -> Option { Some(Parent.into()) } @@ -1002,7 +1015,7 @@ impl_runtime_apis! { // Relay/native token can be teleported between Collectives and Relay. Some(( Asset { - fun: Fungible(EXISTENTIAL_DEPOSIT), + fun: Fungible(ExistentialDeposit::get()), id: AssetId(Parent.into()) }.into(), Parent.into(), @@ -1025,6 +1038,13 @@ impl_runtime_apis! { dest ) } + + fn get_asset() -> Asset { + Asset { + id: AssetId(Location::parent()), + fun: Fungible(ExistentialDeposit::get()), + } + } } let whitelist: Vec = vec![ diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs index 50dfbffde01f21d1138f0fdaa27649f962e245e4..5d427d850046ff030c6c5b6247426849227e7ea1 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_xcm` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-05, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-r43aesjn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,8 +64,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 24_540_000 picoseconds. - Weight::from_parts(25_439_000, 0) + // Minimum execution time: 21_813_000 picoseconds. + Weight::from_parts(22_332_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -90,8 +90,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `214` // Estimated: `3679` - // Minimum execution time: 86_614_000 picoseconds. - Weight::from_parts(88_884_000, 0) + // Minimum execution time: 93_243_000 picoseconds. + Weight::from_parts(95_650_000, 0) .saturating_add(Weight::from_parts(0, 3679)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -126,8 +126,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `214` // Estimated: `3679` - // Minimum execution time: 87_915_000 picoseconds. - Weight::from_parts(90_219_000, 0) + // Minimum execution time: 96_199_000 picoseconds. + Weight::from_parts(98_620_000, 0) .saturating_add(Weight::from_parts(0, 3679)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -148,8 +148,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_872_000 picoseconds. - Weight::from_parts(7_110_000, 0) + // Minimum execution time: 6_442_000 picoseconds. + Weight::from_parts(6_682_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -159,8 +159,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_009_000 picoseconds. - Weight::from_parts(2_163_000, 0) + // Minimum execution time: 1_833_000 picoseconds. + Weight::from_parts(1_973_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -186,8 +186,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 28_858_000 picoseconds. - Weight::from_parts(29_355_000, 0) + // Minimum execution time: 27_318_000 picoseconds. + Weight::from_parts(28_224_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -212,8 +212,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `363` // Estimated: `3828` - // Minimum execution time: 30_598_000 picoseconds. - Weight::from_parts(31_168_000, 0) + // Minimum execution time: 29_070_000 picoseconds. + Weight::from_parts(30_205_000, 0) .saturating_add(Weight::from_parts(0, 3828)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -224,45 +224,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_090_000 picoseconds. - Weight::from_parts(2_253_000, 0) + // Minimum execution time: 1_904_000 picoseconds. + Weight::from_parts(2_033_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:4 w:2) + /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `162` - // Estimated: `11052` - // Minimum execution time: 16_133_000 picoseconds. - Weight::from_parts(16_433_000, 0) - .saturating_add(Weight::from_parts(0, 11052)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `159` + // Estimated: `13524` + // Minimum execution time: 18_348_000 picoseconds. + Weight::from_parts(18_853_000, 0) + .saturating_add(Weight::from_parts(0, 13524)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `166` - // Estimated: `11056` - // Minimum execution time: 16_012_000 picoseconds. - Weight::from_parts(16_449_000, 0) - .saturating_add(Weight::from_parts(0, 11056)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `163` + // Estimated: `13528` + // Minimum execution time: 17_964_000 picoseconds. + Weight::from_parts(18_548_000, 0) + .saturating_add(Weight::from_parts(0, 13528)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `173` - // Estimated: `13538` - // Minimum execution time: 17_922_000 picoseconds. - Weight::from_parts(18_426_000, 0) - .saturating_add(Weight::from_parts(0, 13538)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `16013` + // Minimum execution time: 19_708_000 picoseconds. + Weight::from_parts(20_157_000, 0) + .saturating_add(Weight::from_parts(0, 16013)) + .saturating_add(T::DbWeight::get().reads(6)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -282,36 +282,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `6152` - // Minimum execution time: 27_280_000 picoseconds. - Weight::from_parts(28_026_000, 0) + // Minimum execution time: 26_632_000 picoseconds. + Weight::from_parts(27_314_000, 0) .saturating_add(Weight::from_parts(0, 6152)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: // Measured: `206` - // Estimated: `8621` - // Minimum execution time: 9_387_000 picoseconds. - Weight::from_parts(9_644_000, 0) - .saturating_add(Weight::from_parts(0, 8621)) - .saturating_add(T::DbWeight::get().reads(3)) + // Estimated: `11096` + // Minimum execution time: 11_929_000 picoseconds. + Weight::from_parts(12_304_000, 0) + .saturating_add(Weight::from_parts(0, 11096)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `173` - // Estimated: `11063` - // Minimum execution time: 16_649_000 picoseconds. - Weight::from_parts(17_025_000, 0) - .saturating_add(Weight::from_parts(0, 11063)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `170` + // Estimated: `13535` + // Minimum execution time: 18_599_000 picoseconds. + Weight::from_parts(19_195_000, 0) + .saturating_add(Weight::from_parts(0, 13535)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -327,12 +327,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `215` - // Estimated: `11105` - // Minimum execution time: 34_355_000 picoseconds. - Weight::from_parts(35_295_000, 0) - .saturating_add(Weight::from_parts(0, 11105)) - .saturating_add(T::DbWeight::get().reads(10)) + // Measured: `212` + // Estimated: `13577` + // Minimum execution time: 35_524_000 picoseconds. + Weight::from_parts(36_272_000, 0) + .saturating_add(Weight::from_parts(0, 13577)) + .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -343,8 +343,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `1588` - // Minimum execution time: 4_527_000 picoseconds. - Weight::from_parts(4_699_000, 0) + // Minimum execution time: 4_044_000 picoseconds. + Weight::from_parts(4_238_000, 0) .saturating_add(Weight::from_parts(0, 1588)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -355,10 +355,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7740` // Estimated: `11205` - // Minimum execution time: 27_011_000 picoseconds. - Weight::from_parts(27_398_000, 0) + // Minimum execution time: 25_741_000 picoseconds. + Weight::from_parts(26_301_000, 0) .saturating_add(Weight::from_parts(0, 11205)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) + /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn claim_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `160` + // Estimated: `3625` + // Minimum execution time: 35_925_000 picoseconds. + Weight::from_parts(36_978_000, 0) + .saturating_add(Weight::from_parts(0, 3625)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs index cc25cbda0a4276c0c8956cf94b590998f4d3fac9..87f637d5b59a4c56880fed680eace7f5d108a46e 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs @@ -59,7 +59,7 @@ parameter_types! { pub const GovernanceLocation: Location = Location::parent(); pub const FellowshipAdminBodyId: BodyId = BodyId::Index(xcm_constants::body::FELLOWSHIP_ADMIN_INDEX); pub AssetHub: Location = (Parent, Parachain(1000)).into(); - pub const TreasurerBodyId: BodyId = BodyId::Index(xcm_constants::body::TREASURER_INDEX); + pub const TreasurerBodyId: BodyId = BodyId::Treasury; pub AssetHubUsdtId: AssetId = (PalletInstance(50), GeneralIndex(1984)).into(); pub UsdtAssetHub: LocatableAssetId = LocatableAssetId { location: AssetHub::get(), diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs index 7b89f2df807734086031797915bde30b2eff77e2..171ac6a9528f134d9c22548500805ef36e9504f9 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs @@ -21,6 +21,7 @@ use frame_support::{ parameter_types, traits::{ConstBool, ConstU32, Nothing}, }; +use frame_system::EnsureSigned; use pallet_contracts::{ weights::SubstrateWeight, Config, DebugInfo, DefaultAddressGenerator, Frame, Schedule, }; @@ -65,6 +66,8 @@ impl Config for Runtime { type MaxCodeLen = ConstU32<{ 123 * 1024 }>; type MaxStorageKeyLen = ConstU32<128>; type UnsafeUnstableInterface = ConstBool; + type UploadOrigin = EnsureSigned; + type InstantiateOrigin = EnsureSigned; type MaxDebugBufferLen = ConstU32<{ 2 * 1024 * 1024 }>; type MaxDelegateDependencies = ConstU32<32>; type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent; @@ -72,5 +75,6 @@ impl Config for Runtime { type RuntimeHoldReason = RuntimeHoldReason; type Debug = (); type Environment = (); + type ApiVersion = (); type Xcm = pallet_xcm::Pallet; } diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index 15a5d9b04a91e9e5bdd44cdcfbe4fc59ba826847..0668b9a4c7d642120ed9d98204d9942d6cd2bcc9 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -50,7 +50,7 @@ use frame_support::{ dispatch::DispatchClass, genesis_builder_helper::{build_config, create_default_config}, parameter_types, - traits::{ConstBool, ConstU128, ConstU16, ConstU32, ConstU64, ConstU8}, + traits::{ConstBool, ConstU16, ConstU32, ConstU64, ConstU8}, weights::{ConstantMultiplier, Weight}, PalletId, }; @@ -133,7 +133,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("contracts-rococo"), impl_name: create_runtime_str!("contracts-rococo"), authoring_version: 1, - spec_version: 1_007_000, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, @@ -205,6 +205,10 @@ impl pallet_authorship::Config for Runtime { type EventHandler = (CollatorSelection,); } +parameter_types! { + pub const ExistentialDeposit: Balance = EXISTENTIAL_DEPOSIT; +} + impl pallet_balances::Config for Runtime { type MaxLocks = ConstU32<50>; /// The type for recording an account's balance. @@ -212,7 +216,7 @@ impl pallet_balances::Config for Runtime { /// The ubiquitous event type. type RuntimeEvent = RuntimeEvent; type DustRemoval = (); - type ExistentialDeposit = ConstU128; + type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = pallet_balances::weights::SubstrateWeight; type MaxReserves = ConstU32<50>; @@ -427,6 +431,7 @@ mod benches { [pallet_sudo, Sudo] [pallet_timestamp, Timestamp] [pallet_collator_selection, CollatorSelection] + [cumulus_pallet_parachain_system, ParachainSystem] [pallet_contracts, Contracts] [pallet_xcm, PalletXcmExtrinsicsBenchmark::] ); @@ -461,7 +466,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } @@ -712,9 +717,22 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + parameter_types! { + pub ExistentialDepositAsset: Option = Some(( + xcm_config::RelayLocation::get(), + ExistentialDeposit::get() + ).into()); + } + use xcm::latest::prelude::*; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { + type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForParentDelivery, + >; + fn reachable_dest() -> Option { Some(Parent.into()) } @@ -723,7 +741,7 @@ impl_runtime_apis! { // Relay/native token can be teleported between Contracts-System-Para and Relay. Some(( Asset { - fun: Fungible(EXISTENTIAL_DEPOSIT), + fun: Fungible(ExistentialDeposit::get()), id: AssetId(Parent.into()) }, Parent.into(), @@ -746,6 +764,13 @@ impl_runtime_apis! { dest ) } + + fn get_asset() -> Asset { + Asset { + id: AssetId(Location::parent()), + fun: Fungible(EXISTENTIAL_DEPOSIT), + } + } } let whitelist: Vec = vec![ diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml index 4c141fc691c8fef2f0d8d1befa58160eac2632c9..0bc3b510ed50e9ee590fd82961a7d08fb3f581b0 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml @@ -17,7 +17,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = hex-literal = "0.4.1" log = { workspace = true } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", optional = true, features = ["derive"] } +serde = { optional = true, features = ["derive"], workspace = true, default-features = true } # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index f07bac8b2ef075c31ad09dfd4e59fb1025f04c5a..cdff371c5056b502178d8284ba270b6c839bef33 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -133,7 +133,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("coretime-rococo"), impl_name: create_runtime_str!("coretime-rococo"), authoring_version: 1, - spec_version: 1_007_001, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 0, @@ -521,7 +521,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } @@ -715,6 +715,12 @@ impl_runtime_apis! { use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { + type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForParentDelivery, + >; + fn reachable_dest() -> Option { Some(Parent.into()) } @@ -723,7 +729,7 @@ impl_runtime_apis! { // Relay/native token can be teleported between AH and Relay. Some(( Asset { - fun: Fungible(EXISTENTIAL_DEPOSIT), + fun: Fungible(ExistentialDeposit::get()), id: AssetId(Parent.into()) }, Parent.into(), @@ -734,6 +740,13 @@ impl_runtime_apis! { // Reserve transfers are disabled None } + + fn get_asset() -> Asset { + Asset { + id: AssetId(Location::parent()), + fun: Fungible(ExistentialDeposit::get()), + } + } } parameter_types! { diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs index 0e34cba4aaf5d9879dfecc97ffd736b985c98f23..c5d315467c1ed8b2aabf7ac18abe10931a02951b 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs @@ -16,26 +16,24 @@ //! Autogenerated weights for `pallet_xcm` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2024-01-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-j8vvqcjr-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=coretime-rococo-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=coretime-rococo-dev // --header=./cumulus/file_header.txt // --output=./cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/ @@ -64,8 +62,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 22_669_000 picoseconds. - Weight::from_parts(23_227_000, 0) + // Minimum execution time: 35_051_000 picoseconds. + Weight::from_parts(35_200_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -86,8 +84,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 64_486_000 picoseconds. - Weight::from_parts(65_247_000, 0) + // Minimum execution time: 56_235_000 picoseconds. + Weight::from_parts(58_178_000, 0) .saturating_add(Weight::from_parts(0, 3571)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -128,8 +126,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_020_000 picoseconds. - Weight::from_parts(7_300_000, 0) + // Minimum execution time: 6_226_000 picoseconds. + Weight::from_parts(6_403_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -139,8 +137,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_022_000 picoseconds. - Weight::from_parts(2_141_000, 0) + // Minimum execution time: 2_020_000 picoseconds. + Weight::from_parts(2_100_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -164,8 +162,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 26_893_000 picoseconds. - Weight::from_parts(27_497_000, 0) + // Minimum execution time: 24_387_000 picoseconds. + Weight::from_parts(24_814_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) @@ -188,8 +186,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `292` // Estimated: `3757` - // Minimum execution time: 29_673_000 picoseconds. - Weight::from_parts(30_693_000, 0) + // Minimum execution time: 27_039_000 picoseconds. + Weight::from_parts(27_693_000, 0) .saturating_add(Weight::from_parts(0, 3757)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -200,45 +198,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_990_000 picoseconds. - Weight::from_parts(2_105_000, 0) + // Minimum execution time: 1_920_000 picoseconds. + Weight::from_parts(2_082_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:4 w:2) + /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `95` - // Estimated: `10985` - // Minimum execution time: 14_819_000 picoseconds. - Weight::from_parts(15_180_000, 0) - .saturating_add(Weight::from_parts(0, 10985)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `89` + // Estimated: `13454` + // Minimum execution time: 17_141_000 picoseconds. + Weight::from_parts(17_500_000, 0) + .saturating_add(Weight::from_parts(0, 13454)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `99` - // Estimated: `10989` - // Minimum execution time: 14_935_000 picoseconds. - Weight::from_parts(15_335_000, 0) - .saturating_add(Weight::from_parts(0, 10989)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `93` + // Estimated: `13458` + // Minimum execution time: 17_074_000 picoseconds. + Weight::from_parts(17_431_000, 0) + .saturating_add(Weight::from_parts(0, 13458)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `13471` - // Minimum execution time: 16_278_000 picoseconds. - Weight::from_parts(16_553_000, 0) - .saturating_add(Weight::from_parts(0, 13471)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15946` + // Minimum execution time: 19_139_000 picoseconds. + Weight::from_parts(19_474_000, 0) + .saturating_add(Weight::from_parts(0, 15946)) + .saturating_add(T::DbWeight::get().reads(6)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -256,36 +254,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `6082` - // Minimum execution time: 26_360_000 picoseconds. - Weight::from_parts(26_868_000, 0) + // Minimum execution time: 24_346_000 picoseconds. + Weight::from_parts(25_318_000, 0) .saturating_add(Weight::from_parts(0, 6082)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: // Measured: `136` - // Estimated: `8551` - // Minimum execution time: 8_615_000 picoseconds. - Weight::from_parts(8_903_000, 0) - .saturating_add(Weight::from_parts(0, 8551)) - .saturating_add(T::DbWeight::get().reads(3)) + // Estimated: `11026` + // Minimum execution time: 11_777_000 picoseconds. + Weight::from_parts(12_051_000, 0) + .saturating_add(Weight::from_parts(0, 11026)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `10996` - // Minimum execution time: 15_284_000 picoseconds. - Weight::from_parts(15_504_000, 0) - .saturating_add(Weight::from_parts(0, 10996)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `100` + // Estimated: `13465` + // Minimum execution time: 17_538_000 picoseconds. + Weight::from_parts(17_832_000, 0) + .saturating_add(Weight::from_parts(0, 13465)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -299,12 +297,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `148` - // Estimated: `11038` - // Minimum execution time: 32_675_000 picoseconds. - Weight::from_parts(33_816_000, 0) - .saturating_add(Weight::from_parts(0, 11038)) - .saturating_add(T::DbWeight::get().reads(9)) + // Measured: `142` + // Estimated: `13507` + // Minimum execution time: 33_623_000 picoseconds. + Weight::from_parts(34_186_000, 0) + .saturating_add(Weight::from_parts(0, 13507)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -315,8 +313,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 4_058_000 picoseconds. - Weight::from_parts(4_170_000, 0) + // Minimum execution time: 3_363_000 picoseconds. + Weight::from_parts(3_511_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -327,10 +325,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 25_375_000 picoseconds. - Weight::from_parts(26_026_000, 0) + // Minimum execution time: 23_969_000 picoseconds. + Weight::from_parts(24_347_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) + /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn claim_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `90` + // Estimated: `3555` + // Minimum execution time: 34_071_000 picoseconds. + Weight::from_parts(35_031_000, 0) + .saturating_add(Weight::from_parts(0, 3555)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs index 37bb8809dabac0ce52192dc64d0aa54ee136dc25..1722e1fcb319ce1357acb20598778c8018ddf5ae 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs @@ -192,7 +192,7 @@ pub type Barrier = TrailingSetTopicAsId< AllowKnownQueryResponses, WithComputedOrigin< ( - // If the message is one that immediately attemps to pay for execution, then + // If the message is one that immediately attempts to pay for execution, then // allow it. AllowTopLevelPaidExecutionFrom, // Parent and its pluralities (i.e. governance bodies) get free execution. diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml index 135ce95f30f99f9ba54ba690d46d8a4bb9878fea..a7d52dfd7849ec1a20ef400bd45f995374697409 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml @@ -17,7 +17,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = hex-literal = "0.4.1" log = { workspace = true } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", optional = true, features = ["derive"] } +serde = { optional = true, features = ["derive"], workspace = true, default-features = true } # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 85d70d2f17b6416c5243d3195808786ee7a19878..7fa706479923b73c2667db5ae2b8bb272052145c 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -133,7 +133,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("coretime-westend"), impl_name: create_runtime_str!("coretime-westend"), authoring_version: 1, - spec_version: 1_007_001, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 0, @@ -512,7 +512,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } @@ -706,6 +706,12 @@ impl_runtime_apis! { use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { + type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForParentDelivery, + >; + fn reachable_dest() -> Option { Some(Parent.into()) } @@ -714,7 +720,7 @@ impl_runtime_apis! { // Relay/native token can be teleported between AH and Relay. Some(( Asset { - fun: Fungible(EXISTENTIAL_DEPOSIT), + fun: Fungible(ExistentialDeposit::get()), id: AssetId(Parent.into()) }, Parent.into(), @@ -725,6 +731,13 @@ impl_runtime_apis! { // Reserve transfers are disabled None } + + fn get_asset() -> Asset { + Asset { + id: AssetId(Location::parent()), + fun: Fungible(ExistentialDeposit::get()), + } + } } parameter_types! { diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs index d821a581b0dd7bae506f98a52bd28086b9d3c450..0082db3099d029c976779af8600bcaf4410e8a2f 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs @@ -17,25 +17,23 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-westend-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=coretime-westend-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=coretime-westend-dev // --header=./cumulus/file_header.txt // --output=./cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/ @@ -64,8 +62,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 17_946_000 picoseconds. - Weight::from_parts(18_398_000, 0) + // Minimum execution time: 18_410_000 picoseconds. + Weight::from_parts(18_657_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -86,8 +84,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 47_982_000 picoseconds. - Weight::from_parts(49_215_000, 0) + // Minimum execution time: 56_616_000 picoseconds. + Weight::from_parts(57_751_000, 0) .saturating_add(Weight::from_parts(0, 3571)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -128,8 +126,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_042_000 picoseconds. - Weight::from_parts(6_257_000, 0) + // Minimum execution time: 6_014_000 picoseconds. + Weight::from_parts(6_412_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -139,8 +137,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_845_000 picoseconds. - Weight::from_parts(1_993_000, 0) + // Minimum execution time: 1_844_000 picoseconds. + Weight::from_parts(1_957_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -164,8 +162,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 24_062_000 picoseconds. - Weight::from_parts(24_666_000, 0) + // Minimum execution time: 24_067_000 picoseconds. + Weight::from_parts(24_553_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) @@ -188,8 +186,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `292` // Estimated: `3757` - // Minimum execution time: 26_486_000 picoseconds. - Weight::from_parts(27_528_000, 0) + // Minimum execution time: 27_023_000 picoseconds. + Weight::from_parts(27_620_000, 0) .saturating_add(Weight::from_parts(0, 3757)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -200,8 +198,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_881_000 picoseconds. - Weight::from_parts(2_008_000, 0) + // Minimum execution time: 1_866_000 picoseconds. + Weight::from_parts(1_984_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -211,8 +209,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89` // Estimated: `13454` - // Minimum execution time: 15_971_000 picoseconds. - Weight::from_parts(16_455_000, 0) + // Minimum execution time: 16_425_000 picoseconds. + Weight::from_parts(16_680_000, 0) .saturating_add(Weight::from_parts(0, 13454)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -223,8 +221,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `93` // Estimated: `13458` - // Minimum execution time: 16_603_000 picoseconds. - Weight::from_parts(17_037_000, 0) + // Minimum execution time: 16_171_000 picoseconds. + Weight::from_parts(16_564_000, 0) .saturating_add(Weight::from_parts(0, 13458)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -235,8 +233,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `15946` - // Minimum execution time: 17_821_000 picoseconds. - Weight::from_parts(18_200_000, 0) + // Minimum execution time: 17_785_000 picoseconds. + Weight::from_parts(18_123_000, 0) .saturating_add(Weight::from_parts(0, 15946)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -256,8 +254,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `6082` - // Minimum execution time: 23_878_000 picoseconds. - Weight::from_parts(24_721_000, 0) + // Minimum execution time: 23_903_000 picoseconds. + Weight::from_parts(24_769_000, 0) .saturating_add(Weight::from_parts(0, 6082)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) @@ -268,8 +266,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `136` // Estimated: `11026` - // Minimum execution time: 10_566_000 picoseconds. - Weight::from_parts(11_053_000, 0) + // Minimum execution time: 10_617_000 picoseconds. + Weight::from_parts(10_843_000, 0) .saturating_add(Weight::from_parts(0, 11026)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -279,8 +277,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `100` // Estimated: `13465` - // Minimum execution time: 16_020_000 picoseconds. - Weight::from_parts(16_619_000, 0) + // Minimum execution time: 16_656_000 picoseconds. + Weight::from_parts(17_106_000, 0) .saturating_add(Weight::from_parts(0, 13465)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -301,8 +299,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `13507` - // Minimum execution time: 32_136_000 picoseconds. - Weight::from_parts(32_610_000, 0) + // Minimum execution time: 31_721_000 picoseconds. + Weight::from_parts(32_547_000, 0) .saturating_add(Weight::from_parts(0, 13507)) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) @@ -315,8 +313,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 3_336_000 picoseconds. - Weight::from_parts(3_434_000, 0) + // Minimum execution time: 3_439_000 picoseconds. + Weight::from_parts(3_619_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -327,10 +325,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 23_977_000 picoseconds. - Weight::from_parts(24_413_000, 0) + // Minimum execution time: 24_657_000 picoseconds. + Weight::from_parts(24_971_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) + /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn claim_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `90` + // Estimated: `3555` + // Minimum execution time: 34_028_000 picoseconds. + Weight::from_parts(34_697_000, 0) + .saturating_add(Weight::from_parts(0, 3555)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs index 44049adf02711bd3b828f98bc3b90ab330e19317..b03c7748fe09181bc9db6da76ce2d8b2bf8cd353 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs @@ -198,7 +198,7 @@ pub type Barrier = TrailingSetTopicAsId< AllowKnownQueryResponses, WithComputedOrigin< ( - // If the message is one that immediately attemps to pay for execution, then + // If the message is one that immediately attempts to pay for execution, then // allow it. AllowTopLevelPaidExecutionFrom, // Parent, its pluralities (i.e. governance bodies), and the Fellows plurality diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index 10408aaf39a7611f93178802e17403ed4fd90838..232a82115a87a1a64c81319d4fd0677d0409a42e 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -100,7 +100,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("glutton-westend"), impl_name: create_runtime_str!("glutton-westend"), authoring_version: 1, - spec_version: 1_007_000, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -332,7 +332,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } diff --git a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml index 8ff031b3e335b254ef11aaaf09f60055794111b4..c0b8fb7636b5898861c0b12912c58fbf0d43f924 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml @@ -15,7 +15,7 @@ enumflags2 = { version = "0.7.7" } hex-literal = { version = "0.4.1" } log = { workspace = true } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", optional = true, features = ["derive"] } +serde = { optional = true, features = ["derive"], workspace = true, default-features = true } # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index 9e7dd6c884bd97b243e87b5937adb1137d57aa46..2b8cc32f67c653eff4fa56500770654f650b2043 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -126,7 +126,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("people-rococo"), impl_name: create_runtime_str!("people-rococo"), authoring_version: 1, - spec_version: 1_007_000, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 0, @@ -448,6 +448,7 @@ mod benches { [frame_system, SystemBench::] [pallet_balances, Balances] [pallet_identity, Identity] + [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] [pallet_session, SessionBench::] [pallet_utility, Utility] @@ -455,6 +456,7 @@ mod benches { // Polkadot [polkadot_runtime_common::identity_migrator, IdentityMigrator] // Cumulus + [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] [pallet_collator_selection, CollatorSelection] // XCM @@ -493,7 +495,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } @@ -684,6 +686,12 @@ impl_runtime_apis! { use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { + type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForParentDelivery, + >; + fn reachable_dest() -> Option { Some(Parent.into()) } @@ -692,7 +700,7 @@ impl_runtime_apis! { // Relay/native token can be teleported between People and Relay. Some(( Asset { - fun: Fungible(EXISTENTIAL_DEPOSIT), + fun: Fungible(ExistentialDeposit::get()), id: AssetId(Parent.into()) }, Parent.into(), @@ -702,6 +710,13 @@ impl_runtime_apis! { fn reserve_transferable_asset_and_dest() -> Option<(Asset, Location)> { None } + + fn get_asset() -> Asset { + Asset { + id: AssetId(Location::parent()), + fun: Fungible(ExistentialDeposit::get()), + } + } } use xcm::latest::prelude::*; diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs index 0f793524de9f5ef7da835090f9d81008ba756d59..fabce29b5fd9451f27364c38481d2dac98c430d8 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs @@ -1,40 +1,41 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_xcm` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm4`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("people-kusama-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./artifacts/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=people-kusama-dev -// --execution=wasm -// --wasm-execution=compiled -// --pallet=pallet_xcm -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./cumulus/parachains/runtimes/people/people-kusama/src/weights/pallet_xcm.rs +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=people-rococo-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/people/people-rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,57 +48,28 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { - /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem HostConfiguration (r:1 w:0) - /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 25_931_000 picoseconds. - Weight::from_parts(26_340_000, 0) + // Minimum execution time: 17_830_000 picoseconds. + Weight::from_parts(18_411_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: ParachainInfo ParachainId (r:1 w:0) - /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - fn teleport_assets() -> Weight { - // Proof Size summary in bytes: - // Measured: `32` - // Estimated: `1489` - // Minimum execution time: 25_691_000 picoseconds. - Weight::from_parts(25_971_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) - } - /// Storage: Benchmark Override (r:0 w:0) - /// Proof Skipped: Benchmark Override (max_values: None, max_size: None, mode: Measured) - fn reserve_transfer_assets() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:2 w:2) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -108,18 +80,38 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn teleport_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `70` + // Estimated: `3535` + // Minimum execution time: 55_456_000 picoseconds. + Weight::from_parts(56_808_000, 0) + .saturating_add(Weight::from_parts(0, 3535)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn reserve_transfer_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) fn transfer_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `496` - // Estimated: `6208` - // Minimum execution time: 146_932_000 picoseconds. - Weight::from_parts(153_200_000, 0) - .saturating_add(Weight::from_parts(0, 6208)) - .saturating_add(T::DbWeight::get().reads(12)) - .saturating_add(T::DbWeight::get().writes(7)) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: Benchmark Override (r:0 w:0) - /// Proof Skipped: Benchmark Override (max_values: None, max_size: None, mode: Measured) + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) fn execute() -> Weight { // Proof Size summary in bytes: // Measured: `0` @@ -128,189 +120,189 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: PolkadotXcm SupportedVersion (r:0 w:1) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_572_000 picoseconds. - Weight::from_parts(9_924_000, 0) + // Minimum execution time: 5_996_000 picoseconds. + Weight::from_parts(6_154_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: PolkadotXcm SafeXcmVersion (r:0 w:1) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:0 w:1) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn force_default_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_997_000 picoseconds. - Weight::from_parts(3_136_000, 0) + // Minimum execution time: 1_768_000 picoseconds. + Weight::from_parts(1_914_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: PolkadotXcm VersionNotifiers (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionNotifiers (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm QueryCounter (r:1 w:1) - /// Proof Skipped: PolkadotXcm QueryCounter (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem HostConfiguration (r:1 w:0) - /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm Queries (r:0 w:1) - /// Proof Skipped: PolkadotXcm Queries (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_subscribe_version_notify() -> Weight { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 30_271_000 picoseconds. - Weight::from_parts(30_819_000, 0) + // Minimum execution time: 24_120_000 picoseconds. + Weight::from_parts(24_745_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: PolkadotXcm VersionNotifiers (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionNotifiers (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem HostConfiguration (r:1 w:0) - /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm Queries (r:0 w:1) - /// Proof Skipped: PolkadotXcm Queries (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `220` - // Estimated: `3685` - // Minimum execution time: 32_302_000 picoseconds. - Weight::from_parts(32_807_000, 0) - .saturating_add(Weight::from_parts(0, 3685)) + // Measured: `255` + // Estimated: `3720` + // Minimum execution time: 26_630_000 picoseconds. + Weight::from_parts(27_289_000, 0) + .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: PolkadotXcm XcmExecutionSuspended (r:0 w:1) - /// Proof Skipped: PolkadotXcm XcmExecutionSuspended (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) + /// Proof: `PolkadotXcm::XcmExecutionSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn force_suspension() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_960_000 picoseconds. - Weight::from_parts(3_094_000, 0) + // Minimum execution time: 1_821_000 picoseconds. + Weight::from_parts(1_946_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: PolkadotXcm SupportedVersion (r:4 w:2) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `95` - // Estimated: `10985` - // Minimum execution time: 14_877_000 picoseconds. - Weight::from_parts(15_296_000, 0) - .saturating_add(Weight::from_parts(0, 10985)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `89` + // Estimated: `13454` + // Minimum execution time: 16_586_000 picoseconds. + Weight::from_parts(16_977_000, 0) + .saturating_add(Weight::from_parts(0, 13454)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: PolkadotXcm VersionNotifiers (r:4 w:2) - /// Proof Skipped: PolkadotXcm VersionNotifiers (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) + /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `99` - // Estimated: `10989` - // Minimum execution time: 14_835_000 picoseconds. - Weight::from_parts(15_115_000, 0) - .saturating_add(Weight::from_parts(0, 10989)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `93` + // Estimated: `13458` + // Minimum execution time: 16_923_000 picoseconds. + Weight::from_parts(17_415_000, 0) + .saturating_add(Weight::from_parts(0, 13458)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: PolkadotXcm VersionNotifyTargets (r:5 w:0) - /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `13471` - // Minimum execution time: 15_368_000 picoseconds. - Weight::from_parts(15_596_000, 0) - .saturating_add(Weight::from_parts(0, 13471)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15946` + // Minimum execution time: 18_596_000 picoseconds. + Weight::from_parts(18_823_000, 0) + .saturating_add(Weight::from_parts(0, 15946)) + .saturating_add(T::DbWeight::get().reads(6)) } - /// Storage: PolkadotXcm VersionNotifyTargets (r:2 w:1) - /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem HostConfiguration (r:1 w:0) - /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn notify_current_targets() -> Weight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 28_025_000 picoseconds. - Weight::from_parts(28_524_000, 0) + // Minimum execution time: 23_817_000 picoseconds. + Weight::from_parts(24_520_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: PolkadotXcm VersionNotifyTargets (r:3 w:0) - /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: // Measured: `136` - // Estimated: `8551` - // Minimum execution time: 8_166_000 picoseconds. - Weight::from_parts(8_314_000, 0) - .saturating_add(Weight::from_parts(0, 8551)) - .saturating_add(T::DbWeight::get().reads(3)) + // Estimated: `11026` + // Minimum execution time: 11_042_000 picoseconds. + Weight::from_parts(11_578_000, 0) + .saturating_add(Weight::from_parts(0, 11026)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: PolkadotXcm VersionNotifyTargets (r:4 w:2) - /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `10996` - // Minimum execution time: 14_871_000 picoseconds. - Weight::from_parts(15_374_000, 0) - .saturating_add(Weight::from_parts(0, 10996)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `100` + // Estimated: `13465` + // Minimum execution time: 17_306_000 picoseconds. + Weight::from_parts(17_817_000, 0) + .saturating_add(Weight::from_parts(0, 13465)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: PolkadotXcm VersionNotifyTargets (r:4 w:2) - /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem HostConfiguration (r:1 w:0) - /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `112` - // Estimated: `11002` - // Minimum execution time: 33_611_000 picoseconds. - Weight::from_parts(34_008_000, 0) - .saturating_add(Weight::from_parts(0, 11002)) - .saturating_add(T::DbWeight::get().reads(9)) + // Measured: `106` + // Estimated: `13471` + // Minimum execution time: 32_141_000 picoseconds. + Weight::from_parts(32_954_000, 0) + .saturating_add(Weight::from_parts(0, 13471)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -319,11 +311,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn new_query() -> Weight { // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `1588` - // Minimum execution time: 5_496_000 picoseconds. - Weight::from_parts(5_652_000, 0) - .saturating_add(Weight::from_parts(0, 1588)) + // Measured: `32` + // Estimated: `1517` + // Minimum execution time: 3_410_000 picoseconds. + Weight::from_parts(3_556_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -331,11 +323,23 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn take_response() -> Weight { // Proof Size summary in bytes: - // Measured: `7740` - // Estimated: `11205` - // Minimum execution time: 26_140_000 picoseconds. - Weight::from_parts(26_824_000, 0) - .saturating_add(Weight::from_parts(0, 11205)) + // Measured: `7669` + // Estimated: `11134` + // Minimum execution time: 25_021_000 picoseconds. + Weight::from_parts(25_240_000, 0) + .saturating_add(Weight::from_parts(0, 11134)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) + /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn claim_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `90` + // Estimated: `3555` + // Minimum execution time: 33_801_000 picoseconds. + Weight::from_parts(34_655_000, 0) + .saturating_add(Weight::from_parts(0, 3555)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/people/people-rococo/src/xcm_config.rs index 311128a17ca9c16f18adf60e5dbf383161dc3bab..534d6519086205e9159f5064bbbcee2369c6f2b6 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/xcm_config.rs @@ -207,7 +207,7 @@ pub type Barrier = TrailingSetTopicAsId< AllowKnownQueryResponses, WithComputedOrigin< ( - // If the message is one that immediately attemps to pay for execution, then + // If the message is one that immediately attempts to pay for execution, then // allow it. AllowTopLevelPaidExecutionFrom, // Parent and its pluralities (i.e. governance bodies) get free execution. diff --git a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml index a08b97021605046c250524f846cc17bbc5468a6e..e87e825a34e8d9ad8c7f41883f3cfbb826064b06 100644 --- a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml @@ -15,7 +15,7 @@ enumflags2 = { version = "0.7.7" } hex-literal = { version = "0.4.1" } log = { workspace = true } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", optional = true, features = ["derive"] } +serde = { optional = true, features = ["derive"], workspace = true, default-features = true } # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index 4ff743aefe1f2d33bd10af390a1435bd2751fd55..2dc2d06a66b9f7ac543ace52997878cc84f9a36a 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -126,7 +126,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("people-westend"), impl_name: create_runtime_str!("people-westend"), authoring_version: 1, - spec_version: 1_007_000, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 0, @@ -448,6 +448,7 @@ mod benches { [frame_system, SystemBench::] [pallet_balances, Balances] [pallet_identity, Identity] + [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] [pallet_session, SessionBench::] [pallet_utility, Utility] @@ -455,6 +456,7 @@ mod benches { // Polkadot [polkadot_runtime_common::identity_migrator, IdentityMigrator] // Cumulus + [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] [pallet_collator_selection, CollatorSelection] // XCM @@ -493,7 +495,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } @@ -684,6 +686,12 @@ impl_runtime_apis! { use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { + type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForParentDelivery, + >; + fn reachable_dest() -> Option { Some(Parent.into()) } @@ -692,7 +700,7 @@ impl_runtime_apis! { // Relay/native token can be teleported between People and Relay. Some(( Asset { - fun: Fungible(EXISTENTIAL_DEPOSIT), + fun: Fungible(ExistentialDeposit::get()), id: AssetId(Parent.into()) }, Parent.into(), @@ -702,6 +710,13 @@ impl_runtime_apis! { fn reserve_transferable_asset_and_dest() -> Option<(Asset, Location)> { None } + + fn get_asset() -> Asset { + Asset { + id: AssetId(Location::parent()), + fun: Fungible(ExistentialDeposit::get()), + } + } } use xcm::latest::prelude::*; diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs index d3b60471b850e0fffbad01915aaf461be3d48ae0..c337289243b748d721b60421bccf3349044ef194 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs @@ -1,40 +1,41 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_xcm` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm4`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("people-polkadot-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-westend-dev")`, DB CACHE: 1024 // Executed Command: -// ./artifacts/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=people-polkadot-dev -// --execution=wasm -// --wasm-execution=compiled -// --pallet=pallet_xcm -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./cumulus/parachains/runtimes/people/people-polkadot/src/weights/pallet_xcm.rs +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=people-westend-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/people/people-westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,57 +48,28 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { - /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem HostConfiguration (r:1 w:0) - /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 25_783_000 picoseconds. - Weight::from_parts(26_398_000, 0) + // Minimum execution time: 17_856_000 picoseconds. + Weight::from_parts(18_473_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: ParachainInfo ParachainId (r:1 w:0) - /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - fn teleport_assets() -> Weight { - // Proof Size summary in bytes: - // Measured: `32` - // Estimated: `1489` - // Minimum execution time: 25_511_000 picoseconds. - Weight::from_parts(26_120_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) - } - /// Storage: Benchmark Override (r:0 w:0) - /// Proof Skipped: Benchmark Override (max_values: None, max_size: None, mode: Measured) - fn reserve_transfer_assets() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:2 w:2) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -108,18 +80,38 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn teleport_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `70` + // Estimated: `3535` + // Minimum execution time: 56_112_000 picoseconds. + Weight::from_parts(57_287_000, 0) + .saturating_add(Weight::from_parts(0, 3535)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn reserve_transfer_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) fn transfer_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `496` - // Estimated: `6208` - // Minimum execution time: 146_932_000 picoseconds. - Weight::from_parts(153_200_000, 0) - .saturating_add(Weight::from_parts(0, 6208)) - .saturating_add(T::DbWeight::get().reads(12)) - .saturating_add(T::DbWeight::get().writes(7)) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: Benchmark Override (r:0 w:0) - /// Proof Skipped: Benchmark Override (max_values: None, max_size: None, mode: Measured) + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) fn execute() -> Weight { // Proof Size summary in bytes: // Measured: `0` @@ -128,189 +120,189 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: PolkadotXcm SupportedVersion (r:0 w:1) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_707_000 picoseconds. - Weight::from_parts(9_874_000, 0) + // Minimum execution time: 6_186_000 picoseconds. + Weight::from_parts(6_420_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: PolkadotXcm SafeXcmVersion (r:0 w:1) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:0 w:1) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn force_default_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_073_000 picoseconds. - Weight::from_parts(3_183_000, 0) + // Minimum execution time: 1_824_000 picoseconds. + Weight::from_parts(1_999_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: PolkadotXcm VersionNotifiers (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionNotifiers (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm QueryCounter (r:1 w:1) - /// Proof Skipped: PolkadotXcm QueryCounter (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem HostConfiguration (r:1 w:0) - /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm Queries (r:0 w:1) - /// Proof Skipped: PolkadotXcm Queries (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_subscribe_version_notify() -> Weight { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 30_999_000 picoseconds. - Weight::from_parts(31_641_000, 0) + // Minimum execution time: 23_833_000 picoseconds. + Weight::from_parts(24_636_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: PolkadotXcm VersionNotifiers (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionNotifiers (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem HostConfiguration (r:1 w:0) - /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm Queries (r:0 w:1) - /// Proof Skipped: PolkadotXcm Queries (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `220` - // Estimated: `3685` - // Minimum execution time: 33_036_000 picoseconds. - Weight::from_parts(33_596_000, 0) - .saturating_add(Weight::from_parts(0, 3685)) + // Measured: `255` + // Estimated: `3720` + // Minimum execution time: 26_557_000 picoseconds. + Weight::from_parts(27_275_000, 0) + .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: PolkadotXcm XcmExecutionSuspended (r:0 w:1) - /// Proof Skipped: PolkadotXcm XcmExecutionSuspended (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) + /// Proof: `PolkadotXcm::XcmExecutionSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn force_suspension() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_035_000 picoseconds. - Weight::from_parts(3_154_000, 0) + // Minimum execution time: 1_921_000 picoseconds. + Weight::from_parts(2_040_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: PolkadotXcm SupportedVersion (r:4 w:2) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `95` - // Estimated: `10985` - // Minimum execution time: 14_805_000 picoseconds. - Weight::from_parts(15_120_000, 0) - .saturating_add(Weight::from_parts(0, 10985)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `89` + // Estimated: `13454` + // Minimum execution time: 16_832_000 picoseconds. + Weight::from_parts(17_312_000, 0) + .saturating_add(Weight::from_parts(0, 13454)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: PolkadotXcm VersionNotifiers (r:4 w:2) - /// Proof Skipped: PolkadotXcm VersionNotifiers (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) + /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `99` - // Estimated: `10989` - // Minimum execution time: 14_572_000 picoseconds. - Weight::from_parts(14_909_000, 0) - .saturating_add(Weight::from_parts(0, 10989)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `93` + // Estimated: `13458` + // Minimum execution time: 16_687_000 picoseconds. + Weight::from_parts(17_123_000, 0) + .saturating_add(Weight::from_parts(0, 13458)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: PolkadotXcm VersionNotifyTargets (r:5 w:0) - /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `13471` - // Minimum execution time: 15_341_000 picoseconds. - Weight::from_parts(15_708_000, 0) - .saturating_add(Weight::from_parts(0, 13471)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15946` + // Minimum execution time: 18_164_000 picoseconds. + Weight::from_parts(18_580_000, 0) + .saturating_add(Weight::from_parts(0, 15946)) + .saturating_add(T::DbWeight::get().reads(6)) } - /// Storage: PolkadotXcm VersionNotifyTargets (r:2 w:1) - /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem HostConfiguration (r:1 w:0) - /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn notify_current_targets() -> Weight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 27_840_000 picoseconds. - Weight::from_parts(28_248_000, 0) + // Minimum execution time: 23_577_000 picoseconds. + Weight::from_parts(24_324_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: PolkadotXcm VersionNotifyTargets (r:3 w:0) - /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: // Measured: `136` - // Estimated: `8551` - // Minimum execution time: 8_245_000 picoseconds. - Weight::from_parts(8_523_000, 0) - .saturating_add(Weight::from_parts(0, 8551)) - .saturating_add(T::DbWeight::get().reads(3)) + // Estimated: `11026` + // Minimum execution time: 11_014_000 picoseconds. + Weight::from_parts(11_223_000, 0) + .saturating_add(Weight::from_parts(0, 11026)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: PolkadotXcm VersionNotifyTargets (r:4 w:2) - /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `10996` - // Minimum execution time: 14_780_000 picoseconds. - Weight::from_parts(15_173_000, 0) - .saturating_add(Weight::from_parts(0, 10996)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `100` + // Estimated: `13465` + // Minimum execution time: 16_887_000 picoseconds. + Weight::from_parts(17_361_000, 0) + .saturating_add(Weight::from_parts(0, 13465)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: PolkadotXcm VersionNotifyTargets (r:4 w:2) - /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem HostConfiguration (r:1 w:0) - /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `112` - // Estimated: `11002` - // Minimum execution time: 33_422_000 picoseconds. - Weight::from_parts(34_076_000, 0) - .saturating_add(Weight::from_parts(0, 11002)) - .saturating_add(T::DbWeight::get().reads(9)) + // Measured: `106` + // Estimated: `13471` + // Minimum execution time: 31_705_000 picoseconds. + Weight::from_parts(32_166_000, 0) + .saturating_add(Weight::from_parts(0, 13471)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -319,11 +311,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn new_query() -> Weight { // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `1588` - // Minimum execution time: 5_496_000 picoseconds. - Weight::from_parts(5_652_000, 0) - .saturating_add(Weight::from_parts(0, 1588)) + // Measured: `32` + // Estimated: `1517` + // Minimum execution time: 3_568_000 picoseconds. + Weight::from_parts(3_669_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -331,11 +323,23 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn take_response() -> Weight { // Proof Size summary in bytes: - // Measured: `7740` - // Estimated: `11205` - // Minimum execution time: 26_140_000 picoseconds. - Weight::from_parts(26_824_000, 0) - .saturating_add(Weight::from_parts(0, 11205)) + // Measured: `7669` + // Estimated: `11134` + // Minimum execution time: 24_823_000 picoseconds. + Weight::from_parts(25_344_000, 0) + .saturating_add(Weight::from_parts(0, 11134)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) + /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn claim_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `90` + // Estimated: `3555` + // Minimum execution time: 34_516_000 picoseconds. + Weight::from_parts(35_478_000, 0) + .saturating_add(Weight::from_parts(0, 3555)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs index 4b7da91c17e5568bc4ca34f3f4a255f8276893e8..6353a58135258f75ff7be591b538e89e12231652 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs @@ -214,7 +214,7 @@ pub type Barrier = TrailingSetTopicAsId< AllowKnownQueryResponses, WithComputedOrigin< ( - // If the message is one that immediately attemps to pay for execution, then + // If the message is one that immediately attempts to pay for execution, then // allow it. AllowTopLevelPaidExecutionFrom, // Parent, its pluralities (i.e. governance bodies), and the Fellows plurality diff --git a/cumulus/parachains/runtimes/starters/seedling/src/lib.rs b/cumulus/parachains/runtimes/starters/seedling/src/lib.rs index ba077ef887947f6f0f2b639f85c10fcb13a06fcb..4cc0a81ef49a7ce96f74583a10a0d18506a79b28 100644 --- a/cumulus/parachains/runtimes/starters/seedling/src/lib.rs +++ b/cumulus/parachains/runtimes/starters/seedling/src/lib.rs @@ -300,7 +300,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } diff --git a/cumulus/parachains/runtimes/starters/shell/src/lib.rs b/cumulus/parachains/runtimes/starters/shell/src/lib.rs index 457394760d989db4dcd5ba335e1ff2dad46eaaba..829754731a421f1d95e8c207180d159999f9898b 100644 --- a/cumulus/parachains/runtimes/starters/shell/src/lib.rs +++ b/cumulus/parachains/runtimes/starters/shell/src/lib.rs @@ -357,7 +357,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } diff --git a/cumulus/parachains/runtimes/test-utils/Cargo.toml b/cumulus/parachains/runtimes/test-utils/Cargo.toml index a61e05de13fa6dff6e54f583a00683d4a270e429..eda88beb7dabb41bd4075ec5ab6bf8ec2f42d3c8 100644 --- a/cumulus/parachains/runtimes/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/test-utils/Cargo.toml @@ -17,6 +17,7 @@ frame-support = { path = "../../../../substrate/frame/support", default-features frame-system = { path = "../../../../substrate/frame/system", default-features = false } pallet-balances = { path = "../../../../substrate/frame/balances", default-features = false } pallet-session = { path = "../../../../substrate/frame/session", default-features = false } +pallet-timestamp = { path = "../../../../substrate/frame/timestamp", default-features = false } sp-consensus-aura = { path = "../../../../substrate/primitives/consensus/aura", default-features = false } sp-io = { path = "../../../../substrate/primitives/io", default-features = false } sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } @@ -59,6 +60,7 @@ std = [ "pallet-balances/std", "pallet-collator-selection/std", "pallet-session/std", + "pallet-timestamp/std", "pallet-xcm/std", "parachain-info/std", "polkadot-parachain-primitives/std", diff --git a/cumulus/parachains/runtimes/test-utils/src/lib.rs b/cumulus/parachains/runtimes/test-utils/src/lib.rs index b4eb57fcb66f412cd697cfd4d6efd1f551ef7eb6..e62daa16a1256fa993a97d82d4f6df96f261018b 100644 --- a/cumulus/parachains/runtimes/test-utils/src/lib.rs +++ b/cumulus/parachains/runtimes/test-utils/src/lib.rs @@ -34,7 +34,7 @@ use polkadot_parachain_primitives::primitives::{ }; use sp_consensus_aura::{SlotDuration, AURA_ENGINE_ID}; use sp_core::{Encode, U256}; -use sp_runtime::{traits::Header, BuildStorage, Digest, DigestItem}; +use sp_runtime::{traits::Header, BuildStorage, Digest, DigestItem, SaturatedConversion}; use xcm::{ latest::{Asset, Location, XcmContext, XcmHash}, prelude::*, @@ -129,6 +129,7 @@ pub trait BasicParachainRuntime: + parachain_info::Config + pallet_collator_selection::Config + cumulus_pallet_parachain_system::Config + + pallet_timestamp::Config { } @@ -140,7 +141,8 @@ where + pallet_xcm::Config + parachain_info::Config + pallet_collator_selection::Config - + cumulus_pallet_parachain_system::Config, + + cumulus_pallet_parachain_system::Config + + pallet_timestamp::Config, ValidatorIdOf: From>, { } @@ -259,8 +261,10 @@ pub struct RuntimeHelper( ); /// Utility function that advances the chain to the desired block number. /// If an author is provided, that author information is injected to all the blocks in the meantime. -impl - RuntimeHelper +impl< + Runtime: frame_system::Config + cumulus_pallet_parachain_system::Config + pallet_timestamp::Config, + AllPalletsWithoutSystem, + > RuntimeHelper where AccountIdOf: Into<<::RuntimeOrigin as OriginTrait>::AccountId>, @@ -296,6 +300,65 @@ where last_header.expect("run_to_block empty block range") } + pub fn run_to_block_with_finalize(n: u32) -> HeaderFor { + let mut last_header = None; + loop { + let block_number = frame_system::Pallet::::block_number(); + if block_number >= n.into() { + break + } + // Set the new block number and author + let header = frame_system::Pallet::::finalize(); + + let pre_digest = Digest { + logs: vec![DigestItem::PreRuntime(AURA_ENGINE_ID, block_number.encode())], + }; + frame_system::Pallet::::reset_events(); + + let next_block_number = block_number + 1u32.into(); + frame_system::Pallet::::initialize( + &next_block_number, + &header.hash(), + &pre_digest, + ); + AllPalletsWithoutSystem::on_initialize(next_block_number); + + let parent_head = HeadData(header.encode()); + let sproof_builder = RelayStateSproofBuilder { + para_id: ::SelfParaId::get(), + included_para_head: parent_head.clone().into(), + ..Default::default() + }; + + let (relay_parent_storage_root, relay_chain_state) = + sproof_builder.into_state_root_and_proof(); + let inherent_data = ParachainInherentData { + validation_data: PersistedValidationData { + parent_head, + relay_parent_number: (block_number.saturated_into::() * 2 + 1).into(), + relay_parent_storage_root, + max_pov_size: 100_000_000, + }, + relay_chain_state, + downward_messages: Default::default(), + horizontal_messages: Default::default(), + }; + + let _ = cumulus_pallet_parachain_system::Pallet::::set_validation_data( + Runtime::RuntimeOrigin::none(), + inherent_data, + ); + let _ = pallet_timestamp::Pallet::::set( + Runtime::RuntimeOrigin::none(), + 300_u32.into(), + ); + AllPalletsWithoutSystem::on_finalize(next_block_number); + let header = frame_system::Pallet::::finalize(); + last_header = Some(header); + } + last_header.expect("run_to_block empty block range") + } + pub fn root_origin() -> ::RuntimeOrigin { ::RuntimeOrigin::root() } diff --git a/cumulus/parachains/runtimes/test-utils/src/test_cases.rs b/cumulus/parachains/runtimes/test-utils/src/test_cases.rs index f78bf9877ec2443a9ce3d754a533f3ee6a3cf14a..1c58df189b673af60bc3e4a9ae7391294287e2aa 100644 --- a/cumulus/parachains/runtimes/test-utils/src/test_cases.rs +++ b/cumulus/parachains/runtimes/test-utils/src/test_cases.rs @@ -37,7 +37,8 @@ pub fn change_storage_constant_by_governance_works: From>, StorageConstant: Get, StorageConstantType: Encode + PartialEq + std::fmt::Debug, @@ -107,7 +108,8 @@ pub fn set_storage_keys_by_governance_works( + pallet_xcm::Config + parachain_info::Config + pallet_collator_selection::Config - + cumulus_pallet_parachain_system::Config, + + cumulus_pallet_parachain_system::Config + + pallet_timestamp::Config, ValidatorIdOf: From>, { let mut runtime = ExtBuilder::::default() diff --git a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml index 08e5987d43afd5cf2676e56dee79afa04362ada0..7f3e898d923d226e7bc5ff5fc3047eda11942bcc 100644 --- a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml @@ -77,7 +77,6 @@ cumulus-primitives-utility = { path = "../../../../primitives/utility", default- pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } -testnet-parachains-constants = { path = "../../constants", default-features = false, features = ["rococo"] } assets-common = { path = "../../assets/common", default-features = false } [features] @@ -133,7 +132,6 @@ std = [ "sp-transaction-pool/std", "sp-version/std", "substrate-wasm-builder", - "testnet-parachains-constants/std", "xcm-builder/std", "xcm-executor/std", "xcm/std", diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index bf8dcbc24c8d51d61c70ea6a56fa8865ea32c893..0030287edb3b6c5c087c65f2507a62b6d2de3e08 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -699,7 +699,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } diff --git a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs index c0bcd78796261504fd6496cb8b5e64fed8d235c4..84b8fd9bb0a361fb04ec8cf14fd76caf5b4fcf5b 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs @@ -43,7 +43,6 @@ use pallet_xcm::XcmPassthrough; use polkadot_parachain_primitives::primitives::Sibling; use polkadot_runtime_common::impls::ToAuthor; use sp_runtime::traits::Zero; -use testnet_parachains_constants::rococo::snowbridge::EthereumNetwork; use xcm::latest::prelude::*; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, @@ -134,7 +133,7 @@ pub type ForeignFungiblesTransactor = FungiblesAdapter< ForeignAssets, // Use this currency when it is a fungible asset matching the given location or name: ForeignAssetsConvertedConcreteId, - // Convert an XCM MultiLocation into a local account id: + // Convert an XCM Location into a local account id: LocationToAccountId, // Our chain's account ID type (we can't get away without mentioning it explicitly): AccountId, @@ -295,7 +294,13 @@ parameter_types! { 0, [xcm::v3::Junction::PalletInstance(50), xcm::v3::Junction::GeneralIndex(TELEPORTABLE_ASSET_ID.into())] ); - pub EthereumLocation: Location = Location::new(2, [GlobalConsensus(EthereumNetwork::get())]); + + /// The Penpal runtime is utilized for testing with various environment setups. + /// This storage item provides the opportunity to customize testing scenarios + /// by configuring the trusted asset from the `SystemAssetHub`. + /// + /// By default, it is configured as a `SystemAssetHubLocation` and can be modified using `System::set_storage`. + pub storage CustomizableAssetFromSystemAssetHub: Location = SystemAssetHubLocation::get(); } /// Accepts asset with ID `AssetLocation` and is coming from `Origin` chain. @@ -310,11 +315,11 @@ impl, Origin: Get> ContainsPair, NativeAssetFrom, - AssetPrefixFrom, + AssetPrefixFrom, ); pub type TrustedTeleporters = (AssetFromChain,); @@ -326,7 +331,7 @@ impl xcm_executor::Config for XcmConfig { // How to withdraw and deposit an asset. type AssetTransactor = AssetTransactors; type OriginConverter = XcmOriginToTransactDispatchOrigin; - type IsReserve = Reserves; + type IsReserve = TrustedReserves; // no teleport trust established with other chains type IsTeleporter = TrustedTeleporters; type UniversalLocation = UniversalLocation; diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index 57969d9a4f1882d218fdc97374f52f467f109398..2b21a12c3ca40b2ec4406fa42bde71bd61cd8aed 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -107,7 +107,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("test-parachain"), impl_name: create_runtime_str!("test-parachain"), authoring_version: 1, - spec_version: 1_007_000, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, @@ -689,7 +689,7 @@ impl_runtime_apis! { Executive::execute_block(block); } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index d5624941c037e22235e765c011136cf07eff6a05..84a232e954fc74a3dd4aef36750ef311794e72e5 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -21,8 +21,8 @@ codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.28" hex-literal = "0.4.1" log = { workspace = true, default-features = true } -serde = { version = "1.0.196", features = ["derive"] } -serde_json = "1.0.113" +serde = { features = ["derive"], workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } # Local rococo-parachain-runtime = { path = "../parachains/runtimes/testing/rococo-parachain" } diff --git a/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs b/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs index 1db826ea7daf82e93c0099807bab7f44642084b3..15e8a1bf11a055e6c5b4950ad07d78cad72f81a8 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs @@ -25,7 +25,10 @@ use std::str::FromStr; #[derive(Debug, PartialEq)] pub enum BridgeHubRuntimeType { Kusama, + KusamaLocal, + Polkadot, + PolkadotLocal, Rococo, RococoLocal, @@ -44,7 +47,9 @@ impl FromStr for BridgeHubRuntimeType { fn from_str(value: &str) -> Result { match value { polkadot::BRIDGE_HUB_POLKADOT => Ok(BridgeHubRuntimeType::Polkadot), + polkadot::BRIDGE_HUB_POLKADOT_LOCAL => Ok(BridgeHubRuntimeType::PolkadotLocal), kusama::BRIDGE_HUB_KUSAMA => Ok(BridgeHubRuntimeType::Kusama), + kusama::BRIDGE_HUB_KUSAMA_LOCAL => Ok(BridgeHubRuntimeType::KusamaLocal), westend::BRIDGE_HUB_WESTEND => Ok(BridgeHubRuntimeType::Westend), westend::BRIDGE_HUB_WESTEND_LOCAL => Ok(BridgeHubRuntimeType::WestendLocal), westend::BRIDGE_HUB_WESTEND_DEVELOPMENT => Ok(BridgeHubRuntimeType::WestendDevelopment), @@ -103,6 +108,7 @@ impl BridgeHubRuntimeType { Some("Bob".to_string()), |_| (), ))), + other => Err(std::format!("No default config present for {:?}", other)), } } } @@ -242,6 +248,7 @@ pub mod rococo { /// Sub-module for Kusama setup pub mod kusama { pub(crate) const BRIDGE_HUB_KUSAMA: &str = "bridge-hub-kusama"; + pub(crate) const BRIDGE_HUB_KUSAMA_LOCAL: &str = "bridge-hub-kusama-local"; } /// Sub-module for Westend setup. @@ -358,4 +365,5 @@ pub mod westend { /// Sub-module for Polkadot setup pub mod polkadot { pub(crate) const BRIDGE_HUB_POLKADOT: &str = "bridge-hub-polkadot"; + pub(crate) const BRIDGE_HUB_POLKADOT_LOCAL: &str = "bridge-hub-polkadot-local"; } diff --git a/cumulus/polkadot-parachain/src/command.rs b/cumulus/polkadot-parachain/src/command.rs index acba32f048bec53f36d21c1437549f77834bb51a..9ba7b7876b3ee06f0d61c626d4fa3c234e313ff9 100644 --- a/cumulus/polkadot-parachain/src/command.rs +++ b/cumulus/polkadot-parachain/src/command.rs @@ -23,6 +23,7 @@ use crate::{ }, service::{new_partial, Block}, }; +use cumulus_client_service::storage_proof_size::HostFunctions as ReclaimHostFunctions; use cumulus_primitives_core::ParaId; use frame_benchmarking_cli::{BenchmarkCmd, SUBSTRATE_REFERENCE_HARDWARE}; use log::info; @@ -398,7 +399,7 @@ macro_rules! construct_partials { Runtime::AssetHubPolkadot => { let $partials = new_partial::( &$config, - crate::service::aura_build_import_queue::<_, AssetHubPolkadotAuraId>, + crate::service::build_relay_to_aura_import_queue::<_, AssetHubPolkadotAuraId>, )?; $code }, @@ -412,28 +413,21 @@ macro_rules! construct_partials { Runtime::People(_) => { let $partials = new_partial::( &$config, - crate::service::aura_build_import_queue::<_, AuraId>, + crate::service::build_relay_to_aura_import_queue::<_, AuraId>, )?; $code }, Runtime::GluttonWestend | Runtime::Glutton | Runtime::Shell | Runtime::Seedling => { let $partials = new_partial::( &$config, - crate::service::shell_build_import_queue, + crate::service::build_shell_import_queue, )?; $code }, - Runtime::ContractsRococo => { + Runtime::ContractsRococo | Runtime::Penpal(_) | Runtime::Default => { let $partials = new_partial::( &$config, - crate::service::contracts_rococo_build_import_queue, - )?; - $code - }, - Runtime::Penpal(_) | Runtime::Default => { - let $partials = new_partial::( - &$config, - crate::service::rococo_parachain_build_import_queue, + crate::service::build_aura_import_queue, )?; $code }, @@ -449,7 +443,7 @@ macro_rules! construct_async_run { runner.async_run(|$config| { let $components = new_partial::( &$config, - crate::service::aura_build_import_queue::<_, AssetHubPolkadotAuraId>, + crate::service::build_relay_to_aura_import_queue::<_, AssetHubPolkadotAuraId>, )?; let task_manager = $components.task_manager; { $( $code )* }.map(|v| (v, task_manager)) @@ -466,7 +460,7 @@ macro_rules! construct_async_run { runner.async_run(|$config| { let $components = new_partial::( &$config, - crate::service::aura_build_import_queue::<_, AuraId>, + crate::service::build_relay_to_aura_import_queue::<_, AuraId>, )?; let task_manager = $components.task_manager; { $( $code )* }.map(|v| (v, task_manager)) @@ -479,30 +473,20 @@ macro_rules! construct_async_run { runner.async_run(|$config| { let $components = new_partial::( &$config, - crate::service::shell_build_import_queue, + crate::service::build_shell_import_queue, )?; let task_manager = $components.task_manager; { $( $code )* }.map(|v| (v, task_manager)) }) } - Runtime::ContractsRococo => { - runner.async_run(|$config| { - let $components = new_partial::( - &$config, - crate::service::contracts_rococo_build_import_queue, - )?; - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - }, - Runtime::Penpal(_) | Runtime::Default => { + Runtime::ContractsRococo | Runtime::Penpal(_) | Runtime::Default => { runner.async_run(|$config| { let $components = new_partial::< RuntimeApi, _, >( &$config, - crate::service::rococo_parachain_build_import_queue, + crate::service::build_aura_import_queue, )?; let task_manager = $components.task_manager; { $( $code )* }.map(|v| (v, task_manager)) @@ -584,7 +568,7 @@ pub fn run() -> Result<()> { match cmd { BenchmarkCmd::Pallet(cmd) => if cfg!(feature = "runtime-benchmarks") { - runner.sync_run(|config| cmd.run::, ()>(config)) + runner.sync_run(|config| cmd.run::, ReclaimHostFunctions>(config)) } else { Err("Benchmarking wasn't enabled when building the node. \ You can enable it with `--features runtime-benchmarks`." @@ -714,25 +698,19 @@ pub fn run() -> Result<()> { .map_err(Into::into), CollectivesPolkadot => - crate::service::start_generic_aura_node::< - RuntimeApi, - AuraId, - >(config, polkadot_config, collator_options, id, hwbench) + crate::service::start_generic_aura_node(config, polkadot_config, collator_options, id, hwbench) .await .map(|r| r.0) .map_err(Into::into), CollectivesWestend => - crate::service::start_generic_aura_lookahead_node::< - RuntimeApi, - AuraId, - >(config, polkadot_config, collator_options, id, hwbench) + crate::service::start_generic_aura_lookahead_node(config, polkadot_config, collator_options, id, hwbench) .await .map(|r| r.0) .map_err(Into::into), Seedling | Shell => - crate::service::start_shell_node::( + crate::service::start_shell_node( config, polkadot_config, collator_options, @@ -755,36 +733,26 @@ pub fn run() -> Result<()> { .map_err(Into::into), BridgeHub(bridge_hub_runtime_type) => match bridge_hub_runtime_type { - chain_spec::bridge_hubs::BridgeHubRuntimeType::Polkadot => - crate::service::start_generic_aura_node::< - RuntimeApi, - AuraId, - >(config, polkadot_config, collator_options, id, hwbench) + chain_spec::bridge_hubs::BridgeHubRuntimeType::Polkadot | + chain_spec::bridge_hubs::BridgeHubRuntimeType::PolkadotLocal => + crate::service::start_generic_aura_node(config, polkadot_config, collator_options, id, hwbench) .await .map(|r| r.0), - chain_spec::bridge_hubs::BridgeHubRuntimeType::Kusama => - crate::service::start_generic_aura_node::< - RuntimeApi, - AuraId, - >(config, polkadot_config, collator_options, id, hwbench) + chain_spec::bridge_hubs::BridgeHubRuntimeType::Kusama | + chain_spec::bridge_hubs::BridgeHubRuntimeType::KusamaLocal => + crate::service::start_generic_aura_node(config, polkadot_config, collator_options, id, hwbench) .await .map(|r| r.0), chain_spec::bridge_hubs::BridgeHubRuntimeType::Westend | chain_spec::bridge_hubs::BridgeHubRuntimeType::WestendLocal | chain_spec::bridge_hubs::BridgeHubRuntimeType::WestendDevelopment => - crate::service::start_generic_aura_lookahead_node::< - RuntimeApi, - AuraId, - >(config, polkadot_config, collator_options, id, hwbench) + crate::service::start_generic_aura_lookahead_node(config, polkadot_config, collator_options, id, hwbench) .await .map(|r| r.0), chain_spec::bridge_hubs::BridgeHubRuntimeType::Rococo | chain_spec::bridge_hubs::BridgeHubRuntimeType::RococoLocal | chain_spec::bridge_hubs::BridgeHubRuntimeType::RococoDevelopment => - crate::service::start_generic_aura_lookahead_node::< - RuntimeApi, - AuraId, - >(config, polkadot_config, collator_options, id, hwbench) + crate::service::start_generic_aura_lookahead_node(config, polkadot_config, collator_options, id, hwbench) .await .map(|r| r.0), } @@ -797,10 +765,7 @@ pub fn run() -> Result<()> { chain_spec::coretime::CoretimeRuntimeType::Westend | chain_spec::coretime::CoretimeRuntimeType::WestendLocal | chain_spec::coretime::CoretimeRuntimeType::WestendDevelopment => - crate::service::start_generic_aura_lookahead_node::< - RuntimeApi, - AuraId, - >(config, polkadot_config, collator_options, id, hwbench) + crate::service::start_generic_aura_lookahead_node(config, polkadot_config, collator_options, id, hwbench) .await .map(|r| r.0), } @@ -819,10 +784,7 @@ pub fn run() -> Result<()> { .map_err(Into::into), Glutton | GluttonWestend => - crate::service::start_basic_lookahead_node::< - RuntimeApi, - AuraId, - >(config, polkadot_config, collator_options, id, hwbench) + crate::service::start_basic_lookahead_node(config, polkadot_config, collator_options, id, hwbench) .await .map(|r| r.0) .map_err(Into::into), @@ -834,10 +796,7 @@ pub fn run() -> Result<()> { chain_spec::people::PeopleRuntimeType::Westend | chain_spec::people::PeopleRuntimeType::WestendLocal | chain_spec::people::PeopleRuntimeType::WestendDevelopment => - crate::service::start_generic_aura_lookahead_node::< - RuntimeApi, - AuraId, - >(config, polkadot_config, collator_options, id, hwbench) + crate::service::start_generic_aura_lookahead_node(config, polkadot_config, collator_options, id, hwbench) .await .map(|r| r.0), } diff --git a/cumulus/polkadot-parachain/src/fake_runtime_api/asset_hub_polkadot_aura.rs b/cumulus/polkadot-parachain/src/fake_runtime_api/asset_hub_polkadot_aura.rs index 76dd7347ccbc35127747e144af7c8705a15022e4..7778d1bf7d2dc0187fe6a0684023a9a4648be596 100644 --- a/cumulus/polkadot-parachain/src/fake_runtime_api/asset_hub_polkadot_aura.rs +++ b/cumulus/polkadot-parachain/src/fake_runtime_api/asset_hub_polkadot_aura.rs @@ -39,7 +39,7 @@ sp_api::impl_runtime_apis! { unimplemented!() } - fn initialize_block(_: &::Header) { + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { unimplemented!() } } diff --git a/cumulus/polkadot-parachain/src/fake_runtime_api/aura.rs b/cumulus/polkadot-parachain/src/fake_runtime_api/aura.rs index 0f01b85ebcf6fa63aabd9115c2ef553c18badea8..880f5d760c74559db87597bce158f8f5e62dbd82 100644 --- a/cumulus/polkadot-parachain/src/fake_runtime_api/aura.rs +++ b/cumulus/polkadot-parachain/src/fake_runtime_api/aura.rs @@ -39,7 +39,7 @@ sp_api::impl_runtime_apis! { unimplemented!() } - fn initialize_block(_: &::Header) { + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { unimplemented!() } } diff --git a/cumulus/polkadot-parachain/src/service.rs b/cumulus/polkadot-parachain/src/service.rs index 553975b01a80dd379f673b3ee15f22b7eb9e07ea..ddf595ca70c1d675aa66a94daf21036cc8a3295e 100644 --- a/cumulus/polkadot-parachain/src/service.rs +++ b/cumulus/polkadot-parachain/src/service.rs @@ -36,12 +36,13 @@ use cumulus_primitives_core::{ ParaId, }; use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface}; +use sc_rpc::DenyUnsafe; use sp_core::Pair; use jsonrpsee::RpcModule; -use crate::{fake_runtime_api::aura::RuntimeApi, rpc}; -pub use parachains_common::{AccountId, Balance, Block, Hash, Header, Nonce}; +use crate::{fake_runtime_api::aura::RuntimeApi as FakeRuntimeApi, rpc}; +pub use parachains_common::{AccountId, AuraId, Balance, Block, Hash, Header, Nonce}; use cumulus_client_consensus_relay_chain::Verifier as RelayChainVerifier; use futures::{lock::Mutex, prelude::*}; @@ -68,11 +69,15 @@ use substrate_prometheus_endpoint::Registry; use polkadot_primitives::CollatorPair; #[cfg(not(feature = "runtime-benchmarks"))] -type HostFunctions = sp_io::SubstrateHostFunctions; +type HostFunctions = + (sp_io::SubstrateHostFunctions, cumulus_client_service::storage_proof_size::HostFunctions); #[cfg(feature = "runtime-benchmarks")] -type HostFunctions = - (sp_io::SubstrateHostFunctions, frame_benchmarking::benchmarking::HostFunctions); +type HostFunctions = ( + sp_io::SubstrateHostFunctions, + cumulus_client_service::storage_proof_size::HostFunctions, + frame_benchmarking::benchmarking::HostFunctions, +); type ParachainClient = TFullClient>; @@ -81,141 +86,6 @@ type ParachainBackend = TFullBackend; type ParachainBlockImport = TParachainBlockImport>, ParachainBackend>; -/// Native executor instance. -pub struct ShellRuntimeExecutor; - -impl sc_executor::NativeExecutionDispatch for ShellRuntimeExecutor { - type ExtendHostFunctions = (); - - fn dispatch(method: &str, data: &[u8]) -> Option> { - shell_runtime::api::dispatch(method, data) - } - - fn native_version() -> sc_executor::NativeVersion { - shell_runtime::native_version() - } -} - -/// Native Asset Hub Westend (Westmint) executor instance. -pub struct AssetHubWestendExecutor; -impl sc_executor::NativeExecutionDispatch for AssetHubWestendExecutor { - type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; - - fn dispatch(method: &str, data: &[u8]) -> Option> { - asset_hub_westend_runtime::api::dispatch(method, data) - } - - fn native_version() -> sc_executor::NativeVersion { - asset_hub_westend_runtime::native_version() - } -} - -/// Native Westend Collectives executor instance. -pub struct CollectivesWestendRuntimeExecutor; - -impl sc_executor::NativeExecutionDispatch for CollectivesWestendRuntimeExecutor { - type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; - - fn dispatch(method: &str, data: &[u8]) -> Option> { - collectives_westend_runtime::api::dispatch(method, data) - } - - fn native_version() -> sc_executor::NativeVersion { - collectives_westend_runtime::native_version() - } -} - -/// Native BridgeHubRococo executor instance. -pub struct BridgeHubRococoRuntimeExecutor; -impl sc_executor::NativeExecutionDispatch for BridgeHubRococoRuntimeExecutor { - type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; - - fn dispatch(method: &str, data: &[u8]) -> Option> { - bridge_hub_rococo_runtime::api::dispatch(method, data) - } - - fn native_version() -> sc_executor::NativeVersion { - bridge_hub_rococo_runtime::native_version() - } -} - -/// Native `CoretimeRococo` executor instance. -pub struct CoretimeRococoRuntimeExecutor; -impl sc_executor::NativeExecutionDispatch for CoretimeRococoRuntimeExecutor { - type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; - fn dispatch(method: &str, data: &[u8]) -> Option> { - coretime_rococo_runtime::api::dispatch(method, data) - } - fn native_version() -> sc_executor::NativeVersion { - coretime_rococo_runtime::native_version() - } -} - -/// Native `CoretimeWestend` executor instance. -pub struct CoretimeWestendRuntimeExecutor; -impl sc_executor::NativeExecutionDispatch for CoretimeWestendRuntimeExecutor { - type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; - fn dispatch(method: &str, data: &[u8]) -> Option> { - coretime_westend_runtime::api::dispatch(method, data) - } - fn native_version() -> sc_executor::NativeVersion { - coretime_westend_runtime::native_version() - } -} - -/// Native contracts executor instance. -pub struct ContractsRococoRuntimeExecutor; -impl sc_executor::NativeExecutionDispatch for ContractsRococoRuntimeExecutor { - type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; - - fn dispatch(method: &str, data: &[u8]) -> Option> { - contracts_rococo_runtime::api::dispatch(method, data) - } - - fn native_version() -> sc_executor::NativeVersion { - contracts_rococo_runtime::native_version() - } -} - -/// Native Westend Glutton executor instance. -pub struct GluttonWestendRuntimeExecutor; - -impl sc_executor::NativeExecutionDispatch for GluttonWestendRuntimeExecutor { - type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; - - fn dispatch(method: &str, data: &[u8]) -> Option> { - glutton_westend_runtime::api::dispatch(method, data) - } - - fn native_version() -> sc_executor::NativeVersion { - glutton_westend_runtime::native_version() - } -} - -/// Native `PeopleWestend` executor instance. -pub struct PeopleWestendRuntimeExecutor; -impl sc_executor::NativeExecutionDispatch for PeopleWestendRuntimeExecutor { - type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; - fn dispatch(method: &str, data: &[u8]) -> Option> { - people_westend_runtime::api::dispatch(method, data) - } - fn native_version() -> sc_executor::NativeVersion { - people_westend_runtime::native_version() - } -} - -/// Native `PeopleRococo` executor instance. -pub struct PeopleRococoRuntimeExecutor; -impl sc_executor::NativeExecutionDispatch for PeopleRococoRuntimeExecutor { - type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; - fn dispatch(method: &str, data: &[u8]) -> Option> { - people_rococo_runtime::api::dispatch(method, data) - } - fn native_version() -> sc_executor::NativeVersion { - people_rococo_runtime::native_version() - } -} - /// Assembly of PartialComponents (enough to run chain ops subcommands) pub type Service = PartialComponents< ParachainClient, @@ -274,10 +144,11 @@ where .build(); let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts::( + sc_service::new_full_parts_record_import::( config, telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), executor, + true, )?; let client = Arc::new(client); @@ -318,12 +189,11 @@ where }) } -/// Start a shell node with the given parachain `Configuration` and relay chain `Configuration`. +/// Start a node with the given parachain `Configuration` and relay chain `Configuration`. /// -/// This is the actual implementation that is abstract over the executor and the runtime api for -/// shell nodes. +/// This is the actual implementation that is abstract over the executor and the runtime api. #[sc_tracing::logging::prefix_logs_with("Parachain")] -async fn start_shell_node_impl( +async fn start_node_impl( parachain_config: Configuration, polkadot_config: Configuration, collator_options: CollatorOptions, @@ -342,8 +212,15 @@ where + sp_api::ApiExt + sp_offchain::OffchainWorkerApi + sp_block_builder::BlockBuilder - + cumulus_primitives_core::CollectCollationInfo, - RB: Fn(Arc>) -> Result, sc_service::Error> + + cumulus_primitives_core::CollectCollationInfo + + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi + + frame_rpc_system::AccountNonceApi, + RB: Fn( + DenyUnsafe, + Arc>, + Arc, + Arc>>, + ) -> Result, sc_service::Error> + 'static, BIQ: FnOnce( Arc>, @@ -367,6 +244,7 @@ where CollatorPair, OverseerHandle, Arc>) + Send + Sync>, + Arc, ) -> Result<(), sc_service::Error>, { let parachain_config = prepare_node_config(parachain_config); @@ -378,7 +256,6 @@ where let backend = params.backend.clone(); let mut task_manager = params.task_manager; - let (relay_chain_interface, collator_key) = build_relay_chain_interface( polkadot_config, ¶chain_config, @@ -410,8 +287,20 @@ where }) .await?; - let rpc_client = client.clone(); - let rpc_builder = Box::new(move |_, _| rpc_ext_builder(rpc_client.clone())); + let rpc_builder = { + let client = client.clone(); + let transaction_pool = transaction_pool.clone(); + let backend_for_rpc = backend.clone(); + + Box::new(move |deny_unsafe, _| { + rpc_ext_builder( + deny_unsafe, + client.clone(), + backend_for_rpc.clone(), + transaction_pool.clone(), + ) + }) + }; sc_service::spawn_tasks(sc_service::SpawnTasksParams { rpc_builder, @@ -488,6 +377,7 @@ where collator_key.expect("Command line arguments do not allow this. qed"), overseer_handle, announce_block, + backend.clone(), )?; } @@ -496,1148 +386,248 @@ where Ok((task_manager, client)) } -/// Start a node with the given parachain `Configuration` and relay chain `Configuration`. -/// -/// This is the actual implementation that is abstract over the executor and the runtime api. -#[sc_tracing::logging::prefix_logs_with("Parachain")] -async fn start_node_impl( +/// Build the import queue for Aura-based runtimes. +pub fn build_aura_import_queue( + client: Arc>, + block_import: ParachainBlockImport, + config: &Configuration, + telemetry: Option, + task_manager: &TaskManager, +) -> Result, sc_service::Error> { + let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; + + cumulus_client_consensus_aura::import_queue::< + sp_consensus_aura::sr25519::AuthorityPair, + _, + _, + _, + _, + _, + >(cumulus_client_consensus_aura::ImportQueueParams { + block_import, + client, + create_inherent_data_providers: move |_, _| async move { + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + + let slot = + sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( + *timestamp, + slot_duration, + ); + + Ok((slot, timestamp)) + }, + registry: config.prometheus_registry(), + spawner: &task_manager.spawn_essential_handle(), + telemetry, + }) + .map_err(Into::into) +} + +/// Start a rococo parachain node. +pub async fn start_rococo_parachain_node( parachain_config: Configuration, polkadot_config: Configuration, collator_options: CollatorOptions, - sybil_resistance_level: CollatorSybilResistance, para_id: ParaId, - _rpc_ext_builder: RB, - build_import_queue: BIQ, - start_consensus: SC, hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> +) -> sc_service::error::Result<(TaskManager, Arc>)> { + start_node_impl::( + parachain_config, + polkadot_config, + collator_options, + CollatorSybilResistance::Resistant, // Aura + para_id, + build_parachain_rpc_extensions::, + build_aura_import_queue, + start_lookahead_aura_consensus, + hwbench, + ) + .await +} + +/// Build the import queue for the shell runtime. +pub fn build_shell_import_queue( + client: Arc>, + block_import: ParachainBlockImport, + config: &Configuration, + _: Option, + task_manager: &TaskManager, +) -> Result, sc_service::Error> { + cumulus_client_consensus_relay_chain::import_queue( + client, + block_import, + |_, _| async { Ok(()) }, + &task_manager.spawn_essential_handle(), + config.prometheus_registry(), + ) + .map_err(Into::into) +} + +fn build_parachain_rpc_extensions( + deny_unsafe: sc_rpc::DenyUnsafe, + client: Arc>, + backend: Arc, + pool: Arc>>, +) -> Result, sc_service::Error> where RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::Metadata - + sp_session::SessionKeys - + sp_api::ApiExt - + sp_offchain::OffchainWorkerApi + sp_block_builder::BlockBuilder - + cumulus_primitives_core::CollectCollationInfo + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi + frame_rpc_system::AccountNonceApi, - RB: Fn(Arc>) -> Result, sc_service::Error>, - BIQ: FnOnce( - Arc>, - ParachainBlockImport, - &Configuration, - Option, - &TaskManager, - ) -> Result, sc_service::Error>, - SC: FnOnce( - Arc>, - ParachainBlockImport, - Option<&Registry>, - Option, - &TaskManager, - Arc, - Arc>>, - Arc>, - KeystorePtr, - Duration, - ParaId, - CollatorPair, - OverseerHandle, - Arc>) + Send + Sync>, - Arc, - ) -> Result<(), sc_service::Error>, { - let parachain_config = prepare_node_config(parachain_config); + let deps = rpc::FullDeps { client, pool, deny_unsafe }; - let params = new_partial::(¶chain_config, build_import_queue)?; - let (block_import, mut telemetry, telemetry_worker_handle) = params.other; + rpc::create_full(deps, backend).map_err(Into::into) +} - let client = params.client.clone(); - let backend = params.backend.clone(); +fn build_contracts_rpc_extensions( + deny_unsafe: sc_rpc::DenyUnsafe, + client: Arc>, + _backend: Arc, + pool: Arc>>, +) -> Result, sc_service::Error> { + let deps = crate::rpc::FullDeps { client: client.clone(), pool: pool.clone(), deny_unsafe }; - let mut task_manager = params.task_manager; - let (relay_chain_interface, collator_key) = build_relay_chain_interface( + crate::rpc::create_contracts_rococo(deps).map_err(Into::into) +} + +/// Start a polkadot-shell parachain node. +pub async fn start_shell_node( + parachain_config: Configuration, + polkadot_config: Configuration, + collator_options: CollatorOptions, + para_id: ParaId, + hwbench: Option, +) -> sc_service::error::Result<(TaskManager, Arc>)> { + start_node_impl::( + parachain_config, polkadot_config, - ¶chain_config, - telemetry_worker_handle, - &mut task_manager, - collator_options.clone(), - hwbench.clone(), + collator_options, + CollatorSybilResistance::Unresistant, // free-for-all consensus + para_id, + |_, _, _, _| Ok(RpcModule::new(())), + build_shell_import_queue, + start_relay_chain_consensus, + hwbench, ) .await - .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?; - - let validator = parachain_config.role.is_authority(); - let prometheus_registry = parachain_config.prometheus_registry().cloned(); - let transaction_pool = params.transaction_pool.clone(); - let import_queue_service = params.import_queue.service(); - let net_config = FullNetworkConfiguration::new(¶chain_config.network); +} - let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) = - build_network(BuildNetworkParams { - parachain_config: ¶chain_config, - net_config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - para_id, - spawn_handle: task_manager.spawn_handle(), - relay_chain_interface: relay_chain_interface.clone(), - import_queue: params.import_queue, - sybil_resistance_level, - }) - .await?; +enum BuildOnAccess { + Uninitialized(Option R + Send + Sync>>), + Initialized(R), +} - let rpc_builder = { - let client = client.clone(); - let transaction_pool = transaction_pool.clone(); +impl BuildOnAccess { + fn get_mut(&mut self) -> &mut R { + loop { + match self { + Self::Uninitialized(f) => { + *self = Self::Initialized((f.take().unwrap())()); + }, + Self::Initialized(ref mut r) => return r, + } + } + } +} - let backend_for_rpc = backend.clone(); - Box::new(move |deny_unsafe, _| { - let deps = rpc::FullDeps { - client: client.clone(), - pool: transaction_pool.clone(), - deny_unsafe, - }; +/// Special [`ParachainConsensus`] implementation that waits for the upgrade from +/// shell to a parachain runtime that implements Aura. +struct WaitForAuraConsensus { + client: Arc, + aura_consensus: Arc>>>>, + relay_chain_consensus: Arc>>>, + _phantom: PhantomData, +} - rpc::create_full(deps, backend_for_rpc.clone()).map_err(Into::into) - }) - }; - - sc_service::spawn_tasks(sc_service::SpawnTasksParams { - rpc_builder, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - task_manager: &mut task_manager, - config: parachain_config, - keystore: params.keystore_container.keystore(), - backend: backend.clone(), - network: network.clone(), - sync_service: sync_service.clone(), - system_rpc_tx, - tx_handler_controller, - telemetry: telemetry.as_mut(), - })?; - - if let Some(hwbench) = hwbench { - sc_sysinfo::print_hwbench(&hwbench); - if validator { - warn_if_slow_hardware(&hwbench); - } - - if let Some(ref mut telemetry) = telemetry { - let telemetry_handle = telemetry.handle(); - task_manager.spawn_handle().spawn( - "telemetry_hwbench", - None, - sc_sysinfo::initialize_hwbench_telemetry(telemetry_handle, hwbench), - ); +impl Clone for WaitForAuraConsensus { + fn clone(&self) -> Self { + Self { + client: self.client.clone(), + aura_consensus: self.aura_consensus.clone(), + relay_chain_consensus: self.relay_chain_consensus.clone(), + _phantom: PhantomData, } } - - let announce_block = { - let sync_service = sync_service.clone(); - Arc::new(move |hash, data| sync_service.announce_block(hash, data)) - }; - - let relay_chain_slot_duration = Duration::from_secs(6); - - let overseer_handle = relay_chain_interface - .overseer_handle() - .map_err(|e| sc_service::Error::Application(Box::new(e)))?; - - start_relay_chain_tasks(StartRelayChainTasksParams { - client: client.clone(), - announce_block: announce_block.clone(), - para_id, - relay_chain_interface: relay_chain_interface.clone(), - task_manager: &mut task_manager, - da_recovery_profile: if validator { - DARecoveryProfile::Collator - } else { - DARecoveryProfile::FullNode - }, - import_queue: import_queue_service, - relay_chain_slot_duration, - recovery_handle: Box::new(overseer_handle.clone()), - sync_service: sync_service.clone(), - })?; - - if validator { - start_consensus( - client.clone(), - block_import, - prometheus_registry.as_ref(), - telemetry.as_ref().map(|t| t.handle()), - &task_manager, - relay_chain_interface.clone(), - transaction_pool, - sync_service.clone(), - params.keystore_container.keystore(), - relay_chain_slot_duration, - para_id, - collator_key.expect("Command line arguments do not allow this. qed"), - overseer_handle, - announce_block, - backend.clone(), - )?; - } - - start_network.start_network(); - - Ok((task_manager, client)) } -/// Start a node with the given parachain `Configuration` and relay chain `Configuration`. -/// -/// This is the actual implementation that is abstract over the executor and the runtime api. -/// -/// This node is basic in the sense that it doesn't support functionality like transaction -/// payment. Intended to replace start_shell_node in use for glutton, shell, and seedling. -#[sc_tracing::logging::prefix_logs_with("Parachain")] -async fn start_basic_lookahead_node_impl( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - sybil_resistance_level: CollatorSybilResistance, - para_id: ParaId, - rpc_ext_builder: RB, - build_import_queue: BIQ, - start_consensus: SC, - hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> +#[async_trait::async_trait] +impl ParachainConsensus for WaitForAuraConsensus where - RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::Metadata - + sp_session::SessionKeys - + sp_api::ApiExt - + sp_offchain::OffchainWorkerApi - + sp_block_builder::BlockBuilder - + cumulus_primitives_core::CollectCollationInfo - + frame_rpc_system::AccountNonceApi, - RB: Fn(Arc>) -> Result, sc_service::Error> - + 'static, - BIQ: FnOnce( - Arc>, - ParachainBlockImport, - &Configuration, - Option, - &TaskManager, - ) -> Result, sc_service::Error>, - SC: FnOnce( - Arc>, - ParachainBlockImport, - Option<&Registry>, - Option, - &TaskManager, - Arc, - Arc>>, - Arc>, - KeystorePtr, - Duration, - ParaId, - CollatorPair, - OverseerHandle, - Arc>) + Send + Sync>, - Arc, - ) -> Result<(), sc_service::Error>, -{ - let parachain_config = prepare_node_config(parachain_config); - - let params = new_partial::(¶chain_config, build_import_queue)?; - let (block_import, mut telemetry, telemetry_worker_handle) = params.other; - - let client = params.client.clone(); - let backend = params.backend.clone(); - - let mut task_manager = params.task_manager; - let (relay_chain_interface, collator_key) = build_relay_chain_interface( - polkadot_config, - ¶chain_config, - telemetry_worker_handle, - &mut task_manager, - collator_options.clone(), - hwbench.clone(), - ) - .await - .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?; - - let validator = parachain_config.role.is_authority(); - let prometheus_registry = parachain_config.prometheus_registry().cloned(); - let transaction_pool = params.transaction_pool.clone(); - let import_queue_service = params.import_queue.service(); - let net_config = FullNetworkConfiguration::new(¶chain_config.network); - - let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) = - build_network(BuildNetworkParams { - parachain_config: ¶chain_config, - net_config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - para_id, - spawn_handle: task_manager.spawn_handle(), - relay_chain_interface: relay_chain_interface.clone(), - import_queue: params.import_queue, - sybil_resistance_level, - }) - .await?; - - let rpc_client = client.clone(); - let rpc_builder = Box::new(move |_, _| rpc_ext_builder(rpc_client.clone())); - - sc_service::spawn_tasks(sc_service::SpawnTasksParams { - rpc_builder, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - task_manager: &mut task_manager, - config: parachain_config, - keystore: params.keystore_container.keystore(), - backend: backend.clone(), - network: network.clone(), - sync_service: sync_service.clone(), - system_rpc_tx, - tx_handler_controller, - telemetry: telemetry.as_mut(), - })?; - - if let Some(hwbench) = hwbench { - sc_sysinfo::print_hwbench(&hwbench); - if validator { - warn_if_slow_hardware(&hwbench); - } - - if let Some(ref mut telemetry) = telemetry { - let telemetry_handle = telemetry.handle(); - task_manager.spawn_handle().spawn( - "telemetry_hwbench", - None, - sc_sysinfo::initialize_hwbench_telemetry(telemetry_handle, hwbench), - ); - } - } - - let announce_block = { - let sync_service = sync_service.clone(); - Arc::new(move |hash, data| sync_service.announce_block(hash, data)) - }; - - let relay_chain_slot_duration = Duration::from_secs(6); - - let overseer_handle = relay_chain_interface - .overseer_handle() - .map_err(|e| sc_service::Error::Application(Box::new(e)))?; - - start_relay_chain_tasks(StartRelayChainTasksParams { - client: client.clone(), - announce_block: announce_block.clone(), - para_id, - relay_chain_interface: relay_chain_interface.clone(), - task_manager: &mut task_manager, - da_recovery_profile: if validator { - DARecoveryProfile::Collator - } else { - DARecoveryProfile::FullNode - }, - import_queue: import_queue_service, - relay_chain_slot_duration, - recovery_handle: Box::new(overseer_handle.clone()), - sync_service: sync_service.clone(), - })?; - - if validator { - start_consensus( - client.clone(), - block_import, - prometheus_registry.as_ref(), - telemetry.as_ref().map(|t| t.handle()), - &task_manager, - relay_chain_interface.clone(), - transaction_pool, - sync_service.clone(), - params.keystore_container.keystore(), - relay_chain_slot_duration, - para_id, - collator_key.expect("Command line arguments do not allow this. qed"), - overseer_handle, - announce_block, - backend.clone(), - )?; - } - - start_network.start_network(); - - Ok((task_manager, client)) -} - -/// Build the import queue for the rococo parachain runtime. -pub fn rococo_parachain_build_import_queue( - client: Arc>, - block_import: ParachainBlockImport, - config: &Configuration, - telemetry: Option, - task_manager: &TaskManager, -) -> Result, sc_service::Error> { - let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; - - cumulus_client_consensus_aura::import_queue::< - sp_consensus_aura::sr25519::AuthorityPair, - _, - _, - _, - _, - _, - >(cumulus_client_consensus_aura::ImportQueueParams { - block_import, - client, - create_inherent_data_providers: move |_, _| async move { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - - let slot = - sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - *timestamp, - slot_duration, - ); - - Ok((slot, timestamp)) - }, - registry: config.prometheus_registry(), - spawner: &task_manager.spawn_essential_handle(), - telemetry, - }) - .map_err(Into::into) -} - -/// Start a rococo parachain node. -pub async fn start_rococo_parachain_node( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - para_id: ParaId, - hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> { - start_node_impl::( - parachain_config, - polkadot_config, - collator_options, - CollatorSybilResistance::Resistant, // Aura - para_id, - |_| Ok(RpcModule::new(())), - rococo_parachain_build_import_queue, - |client, - block_import, - prometheus_registry, - telemetry, - task_manager, - relay_chain_interface, - transaction_pool, - sync_oracle, - keystore, - relay_chain_slot_duration, - para_id, - collator_key, - overseer_handle, - announce_block, - backend| { - let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( - task_manager.spawn_handle(), - client.clone(), - transaction_pool, - prometheus_registry, - telemetry.clone(), - ); - let proposer = Proposer::new(proposer_factory); - - let collator_service = CollatorService::new( - client.clone(), - Arc::new(task_manager.spawn_handle()), - announce_block, - client.clone(), - ); - - let params = AuraParams { - create_inherent_data_providers: move |_, ()| async move { Ok(()) }, - block_import, - para_client: client.clone(), - para_backend: backend.clone(), - relay_client: relay_chain_interface, - code_hash_provider: move |block_hash| { - client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) - }, - sync_oracle, - keystore, - collator_key, - para_id, - overseer_handle, - relay_chain_slot_duration, - proposer, - collator_service, - authoring_duration: Duration::from_millis(1500), - reinitialize: false, - }; - - let fut = aura::run::< - Block, - sp_consensus_aura::sr25519::AuthorityPair, - _, - _, - _, - _, - _, - _, - _, - _, - _, - >(params); - task_manager.spawn_essential_handle().spawn("aura", None, fut); - - Ok(()) - }, - hwbench, - ) - .await -} - -/// Build the import queue for the shell runtime. -pub fn shell_build_import_queue( - client: Arc>, - block_import: ParachainBlockImport, - config: &Configuration, - _: Option, - task_manager: &TaskManager, -) -> Result, sc_service::Error> -where - RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::Metadata - + sp_session::SessionKeys - + sp_api::ApiExt - + sp_offchain::OffchainWorkerApi - + sp_block_builder::BlockBuilder, -{ - cumulus_client_consensus_relay_chain::import_queue( - client, - block_import, - |_, _| async { Ok(()) }, - &task_manager.spawn_essential_handle(), - config.prometheus_registry(), - ) - .map_err(Into::into) -} - -/// Start a polkadot-shell parachain node. -pub async fn start_shell_node( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - para_id: ParaId, - hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> -where - RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::Metadata - + sp_session::SessionKeys - + sp_api::ApiExt - + sp_offchain::OffchainWorkerApi - + sp_block_builder::BlockBuilder - + cumulus_primitives_core::CollectCollationInfo, -{ - start_shell_node_impl::( - parachain_config, - polkadot_config, - collator_options, - CollatorSybilResistance::Unresistant, // free-for-all consensus - para_id, - |_| Ok(RpcModule::new(())), - shell_build_import_queue, - |client, - block_import, - prometheus_registry, - telemetry, - task_manager, - relay_chain_interface, - transaction_pool, - _sync_oracle, - _keystore, - _relay_chain_slot_duration, - para_id, - collator_key, - overseer_handle, - announce_block| { - let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( - task_manager.spawn_handle(), - client.clone(), - transaction_pool, - prometheus_registry, - telemetry, - ); - - let free_for_all = cumulus_client_consensus_relay_chain::build_relay_chain_consensus( - cumulus_client_consensus_relay_chain::BuildRelayChainConsensusParams { - para_id, - proposer_factory, - block_import, - relay_chain_interface: relay_chain_interface.clone(), - create_inherent_data_providers: move |_, (relay_parent, validation_data)| { - let relay_chain_interface = relay_chain_interface.clone(); - async move { - let parachain_inherent = - cumulus_client_parachain_inherent::ParachainInherentDataProvider::create_at( - relay_parent, - &relay_chain_interface, - &validation_data, - para_id, - ).await; - let parachain_inherent = parachain_inherent.ok_or_else(|| { - Box::::from( - "Failed to create parachain inherent", - ) - })?; - Ok(parachain_inherent) - } - }, - }, - ); - - let spawner = task_manager.spawn_handle(); - - // Required for free-for-all consensus - #[allow(deprecated)] - old_consensus::start_collator_sync(old_consensus::StartCollatorParams { - para_id, - block_status: client.clone(), - announce_block, - overseer_handle, - spawner, - key: collator_key, - parachain_consensus: free_for_all, - runtime_api: client.clone(), - }); - - Ok(()) - }, - hwbench, - ) - .await -} - -enum BuildOnAccess { - Uninitialized(Option R + Send + Sync>>), - Initialized(R), -} - -impl BuildOnAccess { - fn get_mut(&mut self) -> &mut R { - loop { - match self { - Self::Uninitialized(f) => { - *self = Self::Initialized((f.take().unwrap())()); - }, - Self::Initialized(ref mut r) => return r, - } - } - } -} - -/// Special [`ParachainConsensus`] implementation that waits for the upgrade from -/// shell to a parachain runtime that implements Aura. -struct WaitForAuraConsensus { - client: Arc, - aura_consensus: Arc>>>>, - relay_chain_consensus: Arc>>>, - _phantom: PhantomData, -} - -impl Clone for WaitForAuraConsensus { - fn clone(&self) -> Self { - Self { - client: self.client.clone(), - aura_consensus: self.aura_consensus.clone(), - relay_chain_consensus: self.relay_chain_consensus.clone(), - _phantom: PhantomData, - } - } -} - -#[async_trait::async_trait] -impl ParachainConsensus for WaitForAuraConsensus -where - Client: sp_api::ProvideRuntimeApi + Send + Sync, - Client::Api: AuraApi, - AuraId: Send + Codec + Sync, + Client: sp_api::ProvideRuntimeApi + Send + Sync, + Client::Api: AuraApi, + AuraId: Send + Codec + Sync, { async fn produce_candidate( &mut self, parent: &Header, - relay_parent: PHash, - validation_data: &PersistedValidationData, - ) -> Option> { - if self - .client - .runtime_api() - .has_api::>(parent.hash()) - .unwrap_or(false) - { - self.aura_consensus - .lock() - .await - .get_mut() - .produce_candidate(parent, relay_parent, validation_data) - .await - } else { - self.relay_chain_consensus - .lock() - .await - .produce_candidate(parent, relay_parent, validation_data) - .await - } - } -} - -struct Verifier { - client: Arc, - aura_verifier: BuildOnAccess>>, - relay_chain_verifier: Box>, - _phantom: PhantomData, -} - -#[async_trait::async_trait] -impl VerifierT for Verifier -where - Client: sp_api::ProvideRuntimeApi + Send + Sync, - Client::Api: AuraApi, - AuraId: Send + Sync + Codec, -{ - async fn verify( - &mut self, - block_import: BlockImportParams, - ) -> Result, String> { - if self - .client - .runtime_api() - .has_api::>(*block_import.header.parent_hash()) - .unwrap_or(false) - { - self.aura_verifier.get_mut().verify(block_import).await - } else { - self.relay_chain_verifier.verify(block_import).await - } - } -} - -/// Build the import queue for Aura-based runtimes. -pub fn aura_build_import_queue( - client: Arc>, - block_import: ParachainBlockImport, - config: &Configuration, - telemetry_handle: Option, - task_manager: &TaskManager, -) -> Result, sc_service::Error> -where - RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::Metadata - + sp_session::SessionKeys - + sp_api::ApiExt - + sp_offchain::OffchainWorkerApi - + sp_block_builder::BlockBuilder - + sp_consensus_aura::AuraApi::Pair as Pair>::Public>, - <::Pair as Pair>::Signature: - TryFrom> + std::hash::Hash + sp_runtime::traits::Member + Codec, -{ - let client2 = client.clone(); - - let aura_verifier = move || { - let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client2).unwrap(); - - Box::new(cumulus_client_consensus_aura::build_verifier::< - ::Pair, - _, - _, - _, - >(cumulus_client_consensus_aura::BuildVerifierParams { - client: client2.clone(), - create_inherent_data_providers: move |_, _| async move { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - - let slot = - sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - *timestamp, - slot_duration, - ); - - Ok((slot, timestamp)) - }, - telemetry: telemetry_handle, - })) as Box<_> - }; - - let relay_chain_verifier = - Box::new(RelayChainVerifier::new(client.clone(), |_, _| async { Ok(()) })) as Box<_>; - - let verifier = Verifier { - client, - relay_chain_verifier, - aura_verifier: BuildOnAccess::Uninitialized(Some(Box::new(aura_verifier))), - _phantom: PhantomData, - }; - - let registry = config.prometheus_registry(); - let spawner = task_manager.spawn_essential_handle(); - - Ok(BasicQueue::new(verifier, Box::new(block_import), None, &spawner, registry)) -} - -/// Start an aura powered parachain node. Some system chains use this. -pub async fn start_generic_aura_node( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - para_id: ParaId, - hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> -where - RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::Metadata - + sp_session::SessionKeys - + sp_api::ApiExt - + sp_offchain::OffchainWorkerApi - + sp_block_builder::BlockBuilder - + cumulus_primitives_core::CollectCollationInfo - + sp_consensus_aura::AuraApi::Pair as Pair>::Public> - + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi - + frame_rpc_system::AccountNonceApi, - <::Pair as Pair>::Signature: - TryFrom> + std::hash::Hash + sp_runtime::traits::Member + Codec, -{ - start_node_impl::( - parachain_config, - polkadot_config, - collator_options, - CollatorSybilResistance::Resistant, // Aura - para_id, - |_| Ok(RpcModule::new(())), - aura_build_import_queue::<_, AuraId>, - |client, - block_import, - prometheus_registry, - telemetry, - task_manager, - relay_chain_interface, - transaction_pool, - sync_oracle, - keystore, - relay_chain_slot_duration, - para_id, - collator_key, - overseer_handle, - announce_block, - _backend| { - let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; - - let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( - task_manager.spawn_handle(), - client.clone(), - transaction_pool, - prometheus_registry, - telemetry.clone(), - ); - let proposer = Proposer::new(proposer_factory); - - let collator_service = CollatorService::new( - client.clone(), - Arc::new(task_manager.spawn_handle()), - announce_block, - client.clone(), - ); - - let params = BasicAuraParams { - create_inherent_data_providers: move |_, ()| async move { Ok(()) }, - block_import, - para_client: client, - relay_client: relay_chain_interface, - sync_oracle, - keystore, - collator_key, - para_id, - overseer_handle, - slot_duration, - relay_chain_slot_duration, - proposer, - collator_service, - // Very limited proposal time. - authoring_duration: Duration::from_millis(500), - collation_request_receiver: None, - }; - - let fut = - basic_aura::run::::Pair, _, _, _, _, _, _, _>(params); - task_manager.spawn_essential_handle().spawn("aura", None, fut); - - Ok(()) - }, - hwbench, - ) - .await -} - -/// Uses the lookahead collator to support async backing. -/// -/// Start an aura powered parachain node. Some system chains use this. -pub async fn start_generic_aura_lookahead_node( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - para_id: ParaId, - hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> -where - RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::Metadata - + sp_session::SessionKeys - + sp_api::ApiExt - + sp_offchain::OffchainWorkerApi - + sp_block_builder::BlockBuilder - + cumulus_primitives_core::CollectCollationInfo - + sp_consensus_aura::AuraApi::Pair as Pair>::Public> - + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi - + frame_rpc_system::AccountNonceApi - + cumulus_primitives_aura::AuraUnincludedSegmentApi, - <::Pair as Pair>::Signature: - TryFrom> + std::hash::Hash + sp_runtime::traits::Member + Codec, -{ - start_node_impl::( - parachain_config, - polkadot_config, - collator_options, - CollatorSybilResistance::Resistant, // Aura - para_id, - |_| Ok(RpcModule::new(())), - aura_build_import_queue::<_, AuraId>, - |client, - block_import, - prometheus_registry, - telemetry, - task_manager, - relay_chain_interface, - transaction_pool, - sync_oracle, - keystore, - relay_chain_slot_duration, - para_id, - collator_key, - overseer_handle, - announce_block, - backend| { - let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( - task_manager.spawn_handle(), - client.clone(), - transaction_pool, - prometheus_registry, - telemetry.clone(), - ); - let proposer = Proposer::new(proposer_factory); - - let collator_service = CollatorService::new( - client.clone(), - Arc::new(task_manager.spawn_handle()), - announce_block, - client.clone(), - ); - - let params = AuraParams { - create_inherent_data_providers: move |_, ()| async move { Ok(()) }, - block_import, - para_client: client.clone(), - para_backend: backend, - relay_client: relay_chain_interface, - code_hash_provider: move |block_hash| { - client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) - }, - sync_oracle, - keystore, - collator_key, - para_id, - overseer_handle, - relay_chain_slot_duration, - proposer, - collator_service, - authoring_duration: Duration::from_millis(1500), - reinitialize: false, - }; - - let fut = - aura::run::::Pair, _, _, _, _, _, _, _, _, _>(params); - task_manager.spawn_essential_handle().spawn("aura", None, fut); - - Ok(()) - }, - hwbench, - ) - .await -} - -/// Start a shell node which should later transition into an Aura powered parachain node. Asset Hub -/// uses this because at genesis, Asset Hub was on the `shell` runtime which didn't have Aura and -/// needs to sync and upgrade before it can run `AuraApi` functions. -pub async fn start_asset_hub_node( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - para_id: ParaId, - hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> -where - RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::Metadata - + sp_session::SessionKeys - + sp_api::ApiExt - + sp_offchain::OffchainWorkerApi - + sp_block_builder::BlockBuilder - + cumulus_primitives_core::CollectCollationInfo - + sp_consensus_aura::AuraApi::Pair as Pair>::Public> - + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi - + frame_rpc_system::AccountNonceApi, - <::Pair as Pair>::Signature: - TryFrom> + std::hash::Hash + sp_runtime::traits::Member + Codec, -{ - start_node_impl::( - parachain_config, - polkadot_config, - collator_options, - CollatorSybilResistance::Resistant, // Aura - para_id, - |_| Ok(RpcModule::new(())), - aura_build_import_queue::<_, AuraId>, - |client, - block_import, - prometheus_registry, - telemetry, - task_manager, - relay_chain_interface, - transaction_pool, - sync_oracle, - keystore, - relay_chain_slot_duration, - para_id, - collator_key, - overseer_handle, - announce_block, - _backend| { - let relay_chain_interface2 = relay_chain_interface.clone(); - - let collator_service = CollatorService::new( - client.clone(), - Arc::new(task_manager.spawn_handle()), - announce_block, - client.clone(), - ); - - let spawner = task_manager.spawn_handle(); - - let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( - spawner, - client.clone(), - transaction_pool, - prometheus_registry, - telemetry.clone(), - ); - - let collation_future = Box::pin(async move { - // Start collating with the `shell` runtime while waiting for an upgrade to an Aura - // compatible runtime. - let mut request_stream = cumulus_client_collator::relay_chain_driven::init( - collator_key.clone(), - para_id, - overseer_handle.clone(), - ) - .await; - while let Some(request) = request_stream.next().await { - let pvd = request.persisted_validation_data().clone(); - let last_head_hash = - match ::Header::decode(&mut &pvd.parent_head.0[..]) { - Ok(header) => header.hash(), - Err(e) => { - log::error!("Could not decode the head data: {e}"); - request.complete(None); - continue - }, - }; - - // Check if we have upgraded to an Aura compatible runtime and transition if - // necessary. - if client - .runtime_api() - .has_api::>(last_head_hash) - .unwrap_or(false) - { - // Respond to this request before transitioning to Aura. - request.complete(None); - break - } - } - - // Move to Aura consensus. - let slot_duration = match cumulus_client_consensus_aura::slot_duration(&*client) { - Ok(d) => d, - Err(e) => { - log::error!("Could not get Aura slot duration: {e}"); - return - }, - }; - - let proposer = Proposer::new(proposer_factory); - - let params = BasicAuraParams { - create_inherent_data_providers: move |_, ()| async move { Ok(()) }, - block_import, - para_client: client, - relay_client: relay_chain_interface2, - sync_oracle, - keystore, - collator_key, - para_id, - overseer_handle, - slot_duration, - relay_chain_slot_duration, - proposer, - collator_service, - // Very limited proposal time. - authoring_duration: Duration::from_millis(500), - collation_request_receiver: Some(request_stream), - }; - - basic_aura::run::::Pair, _, _, _, _, _, _, _>(params) - .await - }); + relay_parent: PHash, + validation_data: &PersistedValidationData, + ) -> Option> { + if self + .client + .runtime_api() + .has_api::>(parent.hash()) + .unwrap_or(false) + { + self.aura_consensus + .lock() + .await + .get_mut() + .produce_candidate(parent, relay_parent, validation_data) + .await + } else { + self.relay_chain_consensus + .lock() + .await + .produce_candidate(parent, relay_parent, validation_data) + .await + } + } +} - let spawner = task_manager.spawn_essential_handle(); - spawner.spawn_essential("cumulus-asset-hub-collator", None, collation_future); +struct Verifier { + client: Arc, + aura_verifier: BuildOnAccess>>, + relay_chain_verifier: Box>, + _phantom: PhantomData, +} - Ok(()) - }, - hwbench, - ) - .await +#[async_trait::async_trait] +impl VerifierT for Verifier +where + Client: sp_api::ProvideRuntimeApi + Send + Sync, + Client::Api: AuraApi, + AuraId: Send + Sync + Codec, +{ + async fn verify( + &mut self, + block_import: BlockImportParams, + ) -> Result, String> { + if self + .client + .runtime_api() + .has_api::>(*block_import.header.parent_hash()) + .unwrap_or(false) + { + self.aura_verifier.get_mut().verify(block_import).await + } else { + self.relay_chain_verifier.verify(block_import).await + } + } } -/// Start a shell node which should later transition into an Aura powered parachain node. Asset Hub -/// uses this because at genesis, Asset Hub was on the `shell` runtime which didn't have Aura and -/// needs to sync and upgrade before it can run `AuraApi` functions. -/// -/// Uses the lookahead collator to support async backing. -#[sc_tracing::logging::prefix_logs_with("Parachain")] -pub async fn start_asset_hub_lookahead_node( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - para_id: ParaId, - hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> +/// Build the import queue for parachain runtimes that started with relay chain consensus and +/// switched to aura. +pub fn build_relay_to_aura_import_queue( + client: Arc>, + block_import: ParachainBlockImport, + config: &Configuration, + telemetry_handle: Option, + task_manager: &TaskManager, +) -> Result, sc_service::Error> where RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue @@ -1646,162 +636,74 @@ where + sp_api::ApiExt + sp_offchain::OffchainWorkerApi + sp_block_builder::BlockBuilder - + cumulus_primitives_core::CollectCollationInfo - + sp_consensus_aura::AuraApi::Pair as Pair>::Public> - + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi - + frame_rpc_system::AccountNonceApi - + cumulus_primitives_aura::AuraUnincludedSegmentApi, + + sp_consensus_aura::AuraApi::Pair as Pair>::Public>, <::Pair as Pair>::Signature: TryFrom> + std::hash::Hash + sp_runtime::traits::Member + Codec, { - start_node_impl::( - parachain_config, - polkadot_config, - collator_options, - CollatorSybilResistance::Resistant, // Aura - para_id, - |_| Ok(RpcModule::new(())), - aura_build_import_queue::<_, AuraId>, - |client, - block_import, - prometheus_registry, - telemetry, - task_manager, - relay_chain_interface, - transaction_pool, - sync_oracle, - keystore, - relay_chain_slot_duration, - para_id, - collator_key, - overseer_handle, - announce_block, - backend| { - let relay_chain_interface2 = relay_chain_interface.clone(); - - let collator_service = CollatorService::new( - client.clone(), - Arc::new(task_manager.spawn_handle()), - announce_block, - client.clone(), - ); - - let spawner = task_manager.spawn_handle(); - - let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( - spawner, - client.clone(), - transaction_pool, - prometheus_registry, - telemetry.clone(), - ); - - let collation_future = Box::pin(async move { - // Start collating with the `shell` runtime while waiting for an upgrade to an Aura - // compatible runtime. - let mut request_stream = cumulus_client_collator::relay_chain_driven::init( - collator_key.clone(), - para_id, - overseer_handle.clone(), - ) - .await; - while let Some(request) = request_stream.next().await { - let pvd = request.persisted_validation_data().clone(); - let last_head_hash = - match ::Header::decode(&mut &pvd.parent_head.0[..]) { - Ok(header) => header.hash(), - Err(e) => { - log::error!("Could not decode the head data: {e}"); - request.complete(None); - continue - }, - }; + let verifier_client = client.clone(); - // Check if we have upgraded to an Aura compatible runtime and transition if - // necessary. - if client - .runtime_api() - .has_api::>(last_head_hash) - .unwrap_or(false) - { - // Respond to this request before transitioning to Aura. - request.complete(None); - break - } + let aura_verifier = move || { + Box::new(cumulus_client_consensus_aura::build_verifier::< + ::Pair, + _, + _, + _, + >(cumulus_client_consensus_aura::BuildVerifierParams { + client: verifier_client.clone(), + create_inherent_data_providers: move |parent_hash, _| { + let cidp_client = verifier_client.clone(); + async move { + let slot_duration = cumulus_client_consensus_aura::slot_duration_at( + &*cidp_client, + parent_hash, + )?; + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + + let slot = + sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( + *timestamp, + slot_duration, + ); + + Ok((slot, timestamp)) } + }, + telemetry: telemetry_handle, + })) as Box<_> + }; - // Move to Aura consensus. - let proposer = Proposer::new(proposer_factory); - - let params = AuraParams { - create_inherent_data_providers: move |_, ()| async move { Ok(()) }, - block_import, - para_client: client.clone(), - para_backend: backend, - relay_client: relay_chain_interface2, - code_hash_provider: move |block_hash| { - client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) - }, - sync_oracle, - keystore, - collator_key, - para_id, - overseer_handle, - relay_chain_slot_duration, - proposer, - collator_service, - authoring_duration: Duration::from_millis(1500), - reinitialize: true, /* we need to always re-initialize for asset-hub moving - * to aura */ - }; + let relay_chain_verifier = + Box::new(RelayChainVerifier::new(client.clone(), |_, _| async { Ok(()) })) as Box<_>; - aura::run::::Pair, _, _, _, _, _, _, _, _, _>(params) - .await - }); + let verifier = Verifier { + client, + relay_chain_verifier, + aura_verifier: BuildOnAccess::Uninitialized(Some(Box::new(aura_verifier))), + _phantom: PhantomData, + }; - let spawner = task_manager.spawn_essential_handle(); - spawner.spawn_essential("cumulus-asset-hub-collator", None, collation_future); + let registry = config.prometheus_registry(); + let spawner = task_manager.spawn_essential_handle(); - Ok(()) - }, - hwbench, - ) - .await + Ok(BasicQueue::new(verifier, Box::new(block_import), None, &spawner, registry)) } -/// Start an aura powered parachain node which uses the lookahead collator to support async backing. -/// This node is basic in the sense that its runtime api doesn't include common contents such as -/// transaction payment. Used for aura glutton. -pub async fn start_basic_lookahead_node( +/// Start an aura powered parachain node. Some system chains use this. +pub async fn start_generic_aura_node( parachain_config: Configuration, polkadot_config: Configuration, collator_options: CollatorOptions, para_id: ParaId, hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> -where - RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::Metadata - + sp_session::SessionKeys - + sp_api::ApiExt - + sp_offchain::OffchainWorkerApi - + sp_block_builder::BlockBuilder - + cumulus_primitives_core::CollectCollationInfo - + sp_consensus_aura::AuraApi::Pair as Pair>::Public> - + frame_rpc_system::AccountNonceApi - + cumulus_primitives_aura::AuraUnincludedSegmentApi, - <::Pair as Pair>::Signature: - TryFrom> + std::hash::Hash + sp_runtime::traits::Member + Codec, -{ - start_basic_lookahead_node_impl::( +) -> sc_service::error::Result<(TaskManager, Arc>)> { + start_node_impl::( parachain_config, polkadot_config, collator_options, CollatorSybilResistance::Resistant, // Aura para_id, - |_| Ok(RpcModule::new(())), - aura_build_import_queue::<_, AuraId>, + build_parachain_rpc_extensions::, + build_relay_to_aura_import_queue::<_, AuraId>, |client, block_import, prometheus_registry, @@ -1816,7 +718,9 @@ where collator_key, overseer_handle, announce_block, - backend| { + _backend| { + let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; + let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( task_manager.spawn_handle(), client.clone(), @@ -1833,29 +737,27 @@ where client.clone(), ); - let params = AuraParams { + let params = BasicAuraParams { create_inherent_data_providers: move |_, ()| async move { Ok(()) }, block_import, - para_client: client.clone(), - para_backend: backend.clone(), + para_client: client, relay_client: relay_chain_interface, - code_hash_provider: move |block_hash| { - client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) - }, sync_oracle, keystore, collator_key, para_id, overseer_handle, + slot_duration, relay_chain_slot_duration, proposer, collator_service, - authoring_duration: Duration::from_millis(1500), - reinitialize: false, + // Very limited proposal time. + authoring_duration: Duration::from_millis(500), + collation_request_receiver: None, }; let fut = - aura::run::::Pair, _, _, _, _, _, _, _, _, _>(params); + basic_aura::run::::Pair, _, _, _, _, _, _, _>(params); task_manager.spawn_essential_handle().spawn("aura", None, fut); Ok(()) @@ -1865,16 +767,38 @@ where .await } -#[sc_tracing::logging::prefix_logs_with("Parachain")] -async fn start_contracts_rococo_node_impl( +/// Uses the lookahead collator to support async backing. +/// +/// Start an aura powered parachain node. Some system chains use this. +pub async fn start_generic_aura_lookahead_node( + parachain_config: Configuration, + polkadot_config: Configuration, + collator_options: CollatorOptions, + para_id: ParaId, + hwbench: Option, +) -> sc_service::error::Result<(TaskManager, Arc>)> { + start_node_impl::( + parachain_config, + polkadot_config, + collator_options, + CollatorSybilResistance::Resistant, // Aura + para_id, + build_parachain_rpc_extensions::, + build_relay_to_aura_import_queue::<_, AuraId>, + start_lookahead_aura_consensus, + hwbench, + ) + .await +} + +/// Start a shell node which should later transition into an Aura powered parachain node. Asset Hub +/// uses this because at genesis, Asset Hub was on the `shell` runtime which didn't have Aura and +/// needs to sync and upgrade before it can run `AuraApi` functions. +pub async fn start_asset_hub_node( parachain_config: Configuration, polkadot_config: Configuration, collator_options: CollatorOptions, - sybil_resistance_level: CollatorSybilResistance, para_id: ParaId, - _rpc_ext_builder: RB, - build_import_queue: BIQ, - start_consensus: SC, hwbench: Option, ) -> sc_service::error::Result<(TaskManager, Arc>)> where @@ -1886,228 +810,169 @@ where + sp_offchain::OffchainWorkerApi + sp_block_builder::BlockBuilder + cumulus_primitives_core::CollectCollationInfo + + sp_consensus_aura::AuraApi::Pair as Pair>::Public> + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi - + frame_rpc_system::AccountNonceApi - + cumulus_primitives_aura::AuraUnincludedSegmentApi, - RB: Fn(Arc>) -> Result, sc_service::Error>, - BIQ: FnOnce( - Arc>, - ParachainBlockImport, - &Configuration, - Option, - &TaskManager, - ) -> Result, sc_service::Error>, - SC: FnOnce( - Arc>, - ParachainBlockImport, - Option<&Registry>, - Option, - &TaskManager, - Arc, - Arc>>, - Arc>, - KeystorePtr, - Duration, - ParaId, - CollatorPair, - OverseerHandle, - Arc>) + Send + Sync>, - Arc, - ) -> Result<(), sc_service::Error>, + + frame_rpc_system::AccountNonceApi, + <::Pair as Pair>::Signature: + TryFrom> + std::hash::Hash + sp_runtime::traits::Member + Codec, { - let parachain_config = prepare_node_config(parachain_config); - - let params = new_partial::(¶chain_config, build_import_queue)?; - let (block_import, mut telemetry, telemetry_worker_handle) = params.other; - - let client = params.client.clone(); - let backend = params.backend.clone(); - let mut task_manager = params.task_manager; - - let (relay_chain_interface, collator_key) = build_relay_chain_interface( + start_node_impl::( + parachain_config, polkadot_config, - ¶chain_config, - telemetry_worker_handle, - &mut task_manager, - collator_options.clone(), - hwbench.clone(), - ) - .await - .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?; - - let validator = parachain_config.role.is_authority(); - let prometheus_registry = parachain_config.prometheus_registry().cloned(); - let transaction_pool = params.transaction_pool.clone(); - let import_queue_service = params.import_queue.service(); - let net_config = FullNetworkConfiguration::new(¶chain_config.network); - - let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) = - build_network(BuildNetworkParams { - parachain_config: ¶chain_config, - net_config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - para_id, - spawn_handle: task_manager.spawn_handle(), - relay_chain_interface: relay_chain_interface.clone(), - import_queue: params.import_queue, - sybil_resistance_level, - }) - .await?; - - let rpc_builder = { - let client = client.clone(); - let transaction_pool = transaction_pool.clone(); - - Box::new(move |deny_unsafe, _| { - let deps = crate::rpc::FullDeps { - client: client.clone(), - pool: transaction_pool.clone(), - deny_unsafe, - }; - - crate::rpc::create_contracts_rococo(deps).map_err(Into::into) - }) - }; - - sc_service::spawn_tasks(sc_service::SpawnTasksParams { - rpc_builder, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - task_manager: &mut task_manager, - config: parachain_config, - keystore: params.keystore_container.keystore(), - backend: backend.clone(), - network: network.clone(), - sync_service: sync_service.clone(), - system_rpc_tx, - tx_handler_controller, - telemetry: telemetry.as_mut(), - })?; - - if let Some(hwbench) = hwbench { - sc_sysinfo::print_hwbench(&hwbench); - if validator { - warn_if_slow_hardware(&hwbench); - } - - if let Some(ref mut telemetry) = telemetry { - let telemetry_handle = telemetry.handle(); - task_manager.spawn_handle().spawn( - "telemetry_hwbench", - None, - sc_sysinfo::initialize_hwbench_telemetry(telemetry_handle, hwbench), - ); - } - } - - let announce_block = { - let sync_service = sync_service.clone(); - Arc::new(move |hash, data| sync_service.announce_block(hash, data)) - }; - - let relay_chain_slot_duration = Duration::from_secs(6); - - let overseer_handle = relay_chain_interface - .overseer_handle() - .map_err(|e| sc_service::Error::Application(Box::new(e)))?; - - start_relay_chain_tasks(StartRelayChainTasksParams { - client: client.clone(), - announce_block: announce_block.clone(), + collator_options, + CollatorSybilResistance::Resistant, // Aura para_id, - relay_chain_interface: relay_chain_interface.clone(), - task_manager: &mut task_manager, - da_recovery_profile: if validator { - DARecoveryProfile::Collator - } else { - DARecoveryProfile::FullNode - }, - import_queue: import_queue_service, - relay_chain_slot_duration, - recovery_handle: Box::new(overseer_handle.clone()), - sync_service: sync_service.clone(), - })?; + build_parachain_rpc_extensions::, + build_relay_to_aura_import_queue::<_, AuraId>, + |client, + block_import, + prometheus_registry, + telemetry, + task_manager, + relay_chain_interface, + transaction_pool, + sync_oracle, + keystore, + relay_chain_slot_duration, + para_id, + collator_key, + overseer_handle, + announce_block, + _backend| { + let relay_chain_interface2 = relay_chain_interface.clone(); - if validator { - start_consensus( - client.clone(), - block_import, - prometheus_registry.as_ref(), - telemetry.as_ref().map(|t| t.handle()), - &task_manager, - relay_chain_interface.clone(), - transaction_pool, - sync_service.clone(), - params.keystore_container.keystore(), - relay_chain_slot_duration, - para_id, - collator_key.expect("Command line arguments do not allow this. qed"), - overseer_handle, - announce_block, - backend.clone(), - )?; - } + let collator_service = CollatorService::new( + client.clone(), + Arc::new(task_manager.spawn_handle()), + announce_block, + client.clone(), + ); - start_network.start_network(); + let spawner = task_manager.spawn_handle(); - Ok((task_manager, client)) -} + let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( + spawner, + client.clone(), + transaction_pool, + prometheus_registry, + telemetry.clone(), + ); -#[allow(clippy::type_complexity)] -pub fn contracts_rococo_build_import_queue( - client: Arc>, - block_import: ParachainBlockImport, - config: &Configuration, - telemetry: Option, - task_manager: &TaskManager, -) -> Result, sc_service::Error> { - let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; + let collation_future = Box::pin(async move { + // Start collating with the `shell` runtime while waiting for an upgrade to an Aura + // compatible runtime. + let mut request_stream = cumulus_client_collator::relay_chain_driven::init( + collator_key.clone(), + para_id, + overseer_handle.clone(), + ) + .await; + while let Some(request) = request_stream.next().await { + let pvd = request.persisted_validation_data().clone(); + let last_head_hash = + match ::Header::decode(&mut &pvd.parent_head.0[..]) { + Ok(header) => header.hash(), + Err(e) => { + log::error!("Could not decode the head data: {e}"); + request.complete(None); + continue + }, + }; - cumulus_client_consensus_aura::import_queue::< - sp_consensus_aura::sr25519::AuthorityPair, - _, - _, - _, - _, - _, - >(cumulus_client_consensus_aura::ImportQueueParams { - block_import, - client, - create_inherent_data_providers: move |_, _| async move { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + // Check if we have upgraded to an Aura compatible runtime and transition if + // necessary. + if client + .runtime_api() + .has_api::>(last_head_hash) + .unwrap_or(false) + { + // Respond to this request before transitioning to Aura. + request.complete(None); + break + } + } - let slot = - sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - *timestamp, + // Move to Aura consensus. + let slot_duration = match cumulus_client_consensus_aura::slot_duration(&*client) { + Ok(d) => d, + Err(e) => { + log::error!("Could not get Aura slot duration: {e}"); + return + }, + }; + + let proposer = Proposer::new(proposer_factory); + + let params = BasicAuraParams { + create_inherent_data_providers: move |_, ()| async move { Ok(()) }, + block_import, + para_client: client, + relay_client: relay_chain_interface2, + sync_oracle, + keystore, + collator_key, + para_id, + overseer_handle, slot_duration, - ); + relay_chain_slot_duration, + proposer, + collator_service, + // Very limited proposal time. + authoring_duration: Duration::from_millis(500), + collation_request_receiver: Some(request_stream), + }; - Ok((slot, timestamp)) + basic_aura::run::::Pair, _, _, _, _, _, _, _>(params) + .await + }); + + let spawner = task_manager.spawn_essential_handle(); + spawner.spawn_essential("cumulus-asset-hub-collator", None, collation_future); + + Ok(()) }, - registry: config.prometheus_registry(), - spawner: &task_manager.spawn_essential_handle(), - telemetry, - }) - .map_err(Into::into) + hwbench, + ) + .await } -/// Start a parachain node. -pub async fn start_contracts_rococo_node( +/// Start a shell node which should later transition into an Aura powered parachain node. Asset Hub +/// uses this because at genesis, Asset Hub was on the `shell` runtime which didn't have Aura and +/// needs to sync and upgrade before it can run `AuraApi` functions. +/// +/// Uses the lookahead collator to support async backing. +#[sc_tracing::logging::prefix_logs_with("Parachain")] +pub async fn start_asset_hub_lookahead_node( parachain_config: Configuration, polkadot_config: Configuration, collator_options: CollatorOptions, para_id: ParaId, hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> { - start_contracts_rococo_node_impl::( +) -> sc_service::error::Result<(TaskManager, Arc>)> +where + RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, + RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue + + sp_api::Metadata + + sp_session::SessionKeys + + sp_api::ApiExt + + sp_offchain::OffchainWorkerApi + + sp_block_builder::BlockBuilder + + cumulus_primitives_core::CollectCollationInfo + + sp_consensus_aura::AuraApi::Pair as Pair>::Public> + + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi + + frame_rpc_system::AccountNonceApi + + cumulus_primitives_aura::AuraUnincludedSegmentApi, + <::Pair as Pair>::Signature: + TryFrom> + std::hash::Hash + sp_runtime::traits::Member + Codec, +{ + start_node_impl::( parachain_config, polkadot_config, collator_options, CollatorSybilResistance::Resistant, // Aura para_id, - |_| Ok(RpcModule::new(())), - contracts_rococo_build_import_queue, + build_parachain_rpc_extensions::, + build_relay_to_aura_import_queue::<_, AuraId>, |client, block_import, prometheus_registry, @@ -2123,14 +988,7 @@ pub async fn start_contracts_rococo_node( overseer_handle, announce_block, backend| { - let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( - task_manager.spawn_handle(), - client.clone(), - transaction_pool, - prometheus_registry, - telemetry.clone(), - ); - let proposer = Proposer::new(proposer_factory); + let relay_chain_interface2 = relay_chain_interface.clone(); let collator_service = CollatorService::new( client.clone(), @@ -2139,42 +997,81 @@ pub async fn start_contracts_rococo_node( client.clone(), ); - let params = AuraParams { - create_inherent_data_providers: move |_, ()| async move { Ok(()) }, - block_import, - para_client: client.clone(), - para_backend: backend.clone(), - relay_client: relay_chain_interface, - code_hash_provider: move |block_hash| { - client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) - }, - sync_oracle, - keystore, - collator_key, - para_id, - overseer_handle, - relay_chain_slot_duration, - proposer, - collator_service, - // Very limited proposal time. - authoring_duration: Duration::from_millis(1500), - reinitialize: false, - }; + let spawner = task_manager.spawn_handle(); - let fut = aura::run::< - Block, - sp_consensus_aura::sr25519::AuthorityPair, - _, - _, - _, - _, - _, - _, - _, - _, - _, - >(params); - task_manager.spawn_essential_handle().spawn("aura", None, fut); + let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( + spawner, + client.clone(), + transaction_pool, + prometheus_registry, + telemetry.clone(), + ); + + let collation_future = Box::pin(async move { + // Start collating with the `shell` runtime while waiting for an upgrade to an Aura + // compatible runtime. + let mut request_stream = cumulus_client_collator::relay_chain_driven::init( + collator_key.clone(), + para_id, + overseer_handle.clone(), + ) + .await; + while let Some(request) = request_stream.next().await { + let pvd = request.persisted_validation_data().clone(); + let last_head_hash = + match ::Header::decode(&mut &pvd.parent_head.0[..]) { + Ok(header) => header.hash(), + Err(e) => { + log::error!("Could not decode the head data: {e}"); + request.complete(None); + continue + }, + }; + + // Check if we have upgraded to an Aura compatible runtime and transition if + // necessary. + if client + .runtime_api() + .has_api::>(last_head_hash) + .unwrap_or(false) + { + // Respond to this request before transitioning to Aura. + request.complete(None); + break + } + } + + // Move to Aura consensus. + let proposer = Proposer::new(proposer_factory); + + let params = AuraParams { + create_inherent_data_providers: move |_, ()| async move { Ok(()) }, + block_import, + para_client: client.clone(), + para_backend: backend, + relay_client: relay_chain_interface2, + code_hash_provider: move |block_hash| { + client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) + }, + sync_oracle, + keystore, + collator_key, + para_id, + overseer_handle, + relay_chain_slot_duration, + proposer, + collator_service, + authoring_duration: Duration::from_millis(1500), + reinitialize: true, /* we need to always re-initialize for asset-hub moving + * to aura */ + }; + + aura::run::::Pair, _, _, _, _, _, _, _, _, _>(params) + .await + }); + + let spawner = task_manager.spawn_essential_handle(); + spawner.spawn_essential("cumulus-asset-hub-collator", None, collation_future); Ok(()) }, @@ -2183,6 +1080,184 @@ pub async fn start_contracts_rococo_node( .await } +/// Start relay-chain consensus that is free for all. Everyone can submit a block, the relay-chain +/// decides what is backed and included. +fn start_relay_chain_consensus( + client: Arc>, + block_import: ParachainBlockImport, + prometheus_registry: Option<&Registry>, + telemetry: Option, + task_manager: &TaskManager, + relay_chain_interface: Arc, + transaction_pool: Arc>>, + _sync_oracle: Arc>, + _keystore: KeystorePtr, + _relay_chain_slot_duration: Duration, + para_id: ParaId, + collator_key: CollatorPair, + overseer_handle: OverseerHandle, + announce_block: Arc>) + Send + Sync>, + _backend: Arc, +) -> Result<(), sc_service::Error> { + let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( + task_manager.spawn_handle(), + client.clone(), + transaction_pool, + prometheus_registry, + telemetry, + ); + + let free_for_all = cumulus_client_consensus_relay_chain::build_relay_chain_consensus( + cumulus_client_consensus_relay_chain::BuildRelayChainConsensusParams { + para_id, + proposer_factory, + block_import, + relay_chain_interface: relay_chain_interface.clone(), + create_inherent_data_providers: move |_, (relay_parent, validation_data)| { + let relay_chain_interface = relay_chain_interface.clone(); + async move { + let parachain_inherent = + cumulus_client_parachain_inherent::ParachainInherentDataProvider::create_at( + relay_parent, + &relay_chain_interface, + &validation_data, + para_id, + ).await; + let parachain_inherent = parachain_inherent.ok_or_else(|| { + Box::::from( + "Failed to create parachain inherent", + ) + })?; + Ok(parachain_inherent) + } + }, + }, + ); + + let spawner = task_manager.spawn_handle(); + + // Required for free-for-all consensus + #[allow(deprecated)] + old_consensus::start_collator_sync(old_consensus::StartCollatorParams { + para_id, + block_status: client.clone(), + announce_block, + overseer_handle, + spawner, + key: collator_key, + parachain_consensus: free_for_all, + runtime_api: client.clone(), + }); + + Ok(()) +} + +/// Start consensus using the lookahead aura collator. +fn start_lookahead_aura_consensus( + client: Arc>, + block_import: ParachainBlockImport, + prometheus_registry: Option<&Registry>, + telemetry: Option, + task_manager: &TaskManager, + relay_chain_interface: Arc, + transaction_pool: Arc>>, + sync_oracle: Arc>, + keystore: KeystorePtr, + relay_chain_slot_duration: Duration, + para_id: ParaId, + collator_key: CollatorPair, + overseer_handle: OverseerHandle, + announce_block: Arc>) + Send + Sync>, + backend: Arc, +) -> Result<(), sc_service::Error> { + let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( + task_manager.spawn_handle(), + client.clone(), + transaction_pool, + prometheus_registry, + telemetry.clone(), + ); + + let collator_service = CollatorService::new( + client.clone(), + Arc::new(task_manager.spawn_handle()), + announce_block, + client.clone(), + ); + + let params = AuraParams { + create_inherent_data_providers: move |_, ()| async move { Ok(()) }, + block_import, + para_client: client.clone(), + para_backend: backend, + relay_client: relay_chain_interface, + code_hash_provider: move |block_hash| { + client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) + }, + sync_oracle, + keystore, + collator_key, + para_id, + overseer_handle, + relay_chain_slot_duration, + proposer: Proposer::new(proposer_factory), + collator_service, + authoring_duration: Duration::from_millis(1500), + reinitialize: false, + }; + + let fut = aura::run::::Pair, _, _, _, _, _, _, _, _, _>(params); + task_manager.spawn_essential_handle().spawn("aura", None, fut); + + Ok(()) +} + +/// Start an aura powered parachain node which uses the lookahead collator to support async backing. +/// This node is basic in the sense that its runtime api doesn't include common contents such as +/// transaction payment. Used for aura glutton. +pub async fn start_basic_lookahead_node( + parachain_config: Configuration, + polkadot_config: Configuration, + collator_options: CollatorOptions, + para_id: ParaId, + hwbench: Option, +) -> sc_service::error::Result<(TaskManager, Arc>)> { + start_node_impl::( + parachain_config, + polkadot_config, + collator_options, + CollatorSybilResistance::Resistant, // Aura + para_id, + |_, _, _, _| Ok(RpcModule::new(())), + build_relay_to_aura_import_queue::<_, AuraId>, + start_lookahead_aura_consensus, + hwbench, + ) + .await +} + +/// Start a parachain node for Rococo Contracts. +pub async fn start_contracts_rococo_node( + parachain_config: Configuration, + polkadot_config: Configuration, + collator_options: CollatorOptions, + para_id: ParaId, + hwbench: Option, +) -> sc_service::error::Result<(TaskManager, Arc>)> { + start_node_impl::( + parachain_config, + polkadot_config, + collator_options, + CollatorSybilResistance::Resistant, // Aura + para_id, + build_contracts_rpc_extensions, + build_aura_import_queue, + start_lookahead_aura_consensus, + hwbench, + ) + .await +} + /// Checks that the hardware meets the requirements and print a warning otherwise. fn warn_if_slow_hardware(hwbench: &sc_sysinfo::HwBench) { // Polkadot para-chains should generally use these requirements to ensure that the relay-chain diff --git a/cumulus/primitives/proof-size-hostfunction/src/lib.rs b/cumulus/primitives/proof-size-hostfunction/src/lib.rs index 6da6235e585a343887f87931e375b21bec48c20d..8ebc58ea450d4aea023f2a3af218bbd9eb3546e9 100644 --- a/cumulus/primitives/proof-size-hostfunction/src/lib.rs +++ b/cumulus/primitives/proof-size-hostfunction/src/lib.rs @@ -18,6 +18,7 @@ #![cfg_attr(not(feature = "std"), no_std)] +#[cfg(feature = "std")] use sp_externalities::ExternalitiesExt; use sp_runtime_interface::runtime_interface; @@ -35,7 +36,8 @@ pub const PROOF_RECORDING_DISABLED: u64 = u64::MAX; pub trait StorageProofSize { /// Returns the current storage proof size. fn storage_proof_size(&mut self) -> u64 { - self.extension::().map_or(u64::MAX, |e| e.storage_proof_size()) + self.extension::() + .map_or(PROOF_RECORDING_DISABLED, |e| e.storage_proof_size()) } } diff --git a/cumulus/primitives/storage-weight-reclaim/Cargo.toml b/cumulus/primitives/storage-weight-reclaim/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..4835fb5192b88165c1e7912862492086ddd0853e --- /dev/null +++ b/cumulus/primitives/storage-weight-reclaim/Cargo.toml @@ -0,0 +1,46 @@ +[package] +name = "cumulus-primitives-storage-weight-reclaim" +version = "1.0.0" +authors.workspace = true +edition.workspace = true +description = "Utilities to reclaim storage weight." +license = "Apache-2.0" + +[lints] +workspace = true + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +log = { workspace = true } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } + +frame-support = { path = "../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../substrate/frame/system", default-features = false } + +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } +sp-std = { path = "../../../substrate/primitives/std", default-features = false } + +cumulus-primitives-core = { path = "../../primitives/core", default-features = false } +cumulus-primitives-proof-size-hostfunction = { path = "../../primitives/proof-size-hostfunction", default-features = false } +docify = "0.2.7" + +[dev-dependencies] +sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } +sp-io = { path = "../../../substrate/primitives/io", default-features = false } +cumulus-test-runtime = { path = "../../test/runtime" } + +[features] +default = ["std"] +std = [ + "codec/std", + "cumulus-primitives-core/std", + "cumulus-primitives-proof-size-hostfunction/std", + "frame-support/std", + "frame-system/std", + "log/std", + "scale-info/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", + "sp-trie/std", +] diff --git a/cumulus/primitives/storage-weight-reclaim/src/lib.rs b/cumulus/primitives/storage-weight-reclaim/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..5dddc92e395574dbb36538f3799c0d21b22a3939 --- /dev/null +++ b/cumulus/primitives/storage-weight-reclaim/src/lib.rs @@ -0,0 +1,663 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Mechanism to reclaim PoV proof size weight after an extrinsic has been applied. + +#![cfg_attr(not(feature = "std"), no_std)] + +use codec::{Decode, Encode}; +use cumulus_primitives_core::Weight; +use cumulus_primitives_proof_size_hostfunction::{ + storage_proof_size::storage_proof_size, PROOF_RECORDING_DISABLED, +}; +use frame_support::{ + dispatch::{DispatchInfo, PostDispatchInfo}, + weights::WeightMeter, +}; +use frame_system::Config; +use scale_info::TypeInfo; +use sp_runtime::{ + traits::{DispatchInfoOf, Dispatchable, PostDispatchInfoOf, SignedExtension}, + transaction_validity::TransactionValidityError, + DispatchResult, +}; +use sp_std::marker::PhantomData; + +const LOG_TARGET: &'static str = "runtime::storage_reclaim"; + +/// `StorageWeightReclaimer` is a mechanism for manually reclaiming storage weight. +/// +/// It internally keeps track of the proof size and storage weight at initialization time. At +/// reclaim it computes the real consumed storage weight and refunds excess weight. +/// +/// # Example +#[doc = docify::embed!("src/lib.rs", simple_reclaimer_example)] +pub struct StorageWeightReclaimer { + previous_remaining_proof_size: u64, + previous_reported_proof_size: Option, +} + +impl StorageWeightReclaimer { + /// Creates a new `StorageWeightReclaimer` instance and initializes it with the storage + /// size provided by `weight_meter` and reported proof size from the node. + #[must_use = "Must call `reclaim_with_meter` to reclaim the weight"] + pub fn new(weight_meter: &WeightMeter) -> StorageWeightReclaimer { + let previous_remaining_proof_size = weight_meter.remaining().proof_size(); + let previous_reported_proof_size = get_proof_size(); + Self { previous_remaining_proof_size, previous_reported_proof_size } + } + + /// Check the consumed storage weight and calculate the consumed excess weight. + fn reclaim(&mut self, remaining_weight: Weight) -> Option { + let current_remaining_weight = remaining_weight.proof_size(); + let current_storage_proof_size = get_proof_size()?; + let previous_storage_proof_size = self.previous_reported_proof_size?; + let used_weight = + self.previous_remaining_proof_size.saturating_sub(current_remaining_weight); + let reported_used_size = + current_storage_proof_size.saturating_sub(previous_storage_proof_size); + let reclaimable = used_weight.saturating_sub(reported_used_size); + log::trace!( + target: LOG_TARGET, + "Found reclaimable storage weight. benchmarked: {used_weight}, consumed: {reported_used_size}" + ); + + self.previous_remaining_proof_size = current_remaining_weight.saturating_add(reclaimable); + self.previous_reported_proof_size = Some(current_storage_proof_size); + Some(Weight::from_parts(0, reclaimable)) + } + + /// Check the consumed storage weight and add the reclaimed + /// weight budget back to `weight_meter`. + pub fn reclaim_with_meter(&mut self, weight_meter: &mut WeightMeter) -> Option { + let reclaimed = self.reclaim(weight_meter.remaining())?; + weight_meter.reclaim_proof_size(reclaimed.proof_size()); + Some(reclaimed) + } +} + +/// Returns the current storage proof size from the host side. +/// +/// Returns `None` if proof recording is disabled on the host. +pub fn get_proof_size() -> Option { + let proof_size = storage_proof_size(); + (proof_size != PROOF_RECORDING_DISABLED).then_some(proof_size) +} + +/// Storage weight reclaim mechanism. +/// +/// This extension checks the size of the node-side storage proof +/// before and after executing a given extrinsic. The difference between +/// benchmarked and spent weight can be reclaimed. +#[derive(Encode, Decode, Clone, Eq, PartialEq, Default, TypeInfo)] +#[scale_info(skip_type_params(T))] +pub struct StorageWeightReclaim(PhantomData); + +impl StorageWeightReclaim { + /// Create a new `StorageWeightReclaim` instance. + pub fn new() -> Self { + Self(Default::default()) + } +} + +impl core::fmt::Debug for StorageWeightReclaim { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + let _ = write!(f, "StorageWeightReclaim"); + Ok(()) + } +} + +impl SignedExtension for StorageWeightReclaim +where + T::RuntimeCall: Dispatchable, +{ + const IDENTIFIER: &'static str = "StorageWeightReclaim"; + + type AccountId = T::AccountId; + type Call = T::RuntimeCall; + type AdditionalSigned = (); + type Pre = Option; + + fn additional_signed( + &self, + ) -> Result + { + Ok(()) + } + + fn pre_dispatch( + self, + _who: &Self::AccountId, + _call: &Self::Call, + _info: &sp_runtime::traits::DispatchInfoOf, + _len: usize, + ) -> Result { + Ok(get_proof_size()) + } + + fn post_dispatch( + pre: Option, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, + _len: usize, + _result: &DispatchResult, + ) -> Result<(), TransactionValidityError> { + let Some(Some(pre_dispatch_proof_size)) = pre else { + return Ok(()); + }; + + let Some(post_dispatch_proof_size) = get_proof_size() else { + log::debug!( + target: LOG_TARGET, + "Proof recording enabled during pre-dispatch, now disabled. This should not happen." + ); + return Ok(()) + }; + let benchmarked_weight = info.weight.proof_size(); + let consumed_weight = post_dispatch_proof_size.saturating_sub(pre_dispatch_proof_size); + + // Unspent weight according to the `actual_weight` from `PostDispatchInfo` + // This unspent weight will be refunded by the `CheckWeight` extension, so we need to + // account for that. + let unspent = post_info.calc_unspent(info).proof_size(); + let storage_size_diff = + benchmarked_weight.saturating_sub(unspent).abs_diff(consumed_weight as u64); + + // This value will be reclaimed by [`frame_system::CheckWeight`], so we need to calculate + // that in. + frame_system::BlockWeight::::mutate(|current| { + if consumed_weight > benchmarked_weight { + log::error!( + target: LOG_TARGET, + "Benchmarked storage weight smaller than consumed storage weight. benchmarked: {benchmarked_weight} consumed: {consumed_weight} unspent: {unspent}" + ); + current.accrue(Weight::from_parts(0, storage_size_diff), info.class) + } else { + log::trace!( + target: LOG_TARGET, + "Reclaiming storage weight. benchmarked: {benchmarked_weight}, consumed: {consumed_weight} unspent: {unspent}" + ); + current.reduce(Weight::from_parts(0, storage_size_diff), info.class) + } + }); + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use frame_support::{ + assert_ok, + dispatch::DispatchClass, + weights::{Weight, WeightMeter}, + }; + use frame_system::{BlockWeight, CheckWeight}; + use sp_runtime::{AccountId32, BuildStorage}; + use sp_std::marker::PhantomData; + use sp_trie::proof_size_extension::ProofSizeExt; + + type Test = cumulus_test_runtime::Runtime; + const CALL: &::RuntimeCall = + &cumulus_test_runtime::RuntimeCall::System(frame_system::Call::set_heap_pages { + pages: 0u64, + }); + const ALICE: AccountId32 = AccountId32::new([1u8; 32]); + const LEN: usize = 0; + + pub fn new_test_ext() -> sp_io::TestExternalities { + let ext: sp_io::TestExternalities = cumulus_test_runtime::RuntimeGenesisConfig::default() + .build_storage() + .unwrap() + .into(); + ext + } + + struct TestRecorder { + return_values: Box<[usize]>, + counter: std::sync::atomic::AtomicUsize, + } + + impl TestRecorder { + fn new(values: &[usize]) -> Self { + TestRecorder { return_values: values.into(), counter: Default::default() } + } + } + + impl sp_trie::ProofSizeProvider for TestRecorder { + fn estimate_encoded_size(&self) -> usize { + let counter = self.counter.fetch_add(1, core::sync::atomic::Ordering::Relaxed); + self.return_values[counter] + } + } + + fn setup_test_externalities(proof_values: &[usize]) -> sp_io::TestExternalities { + let mut test_ext = new_test_ext(); + let test_recorder = TestRecorder::new(proof_values); + test_ext.register_extension(ProofSizeExt::new(test_recorder)); + test_ext + } + + fn set_current_storage_weight(new_weight: u64) { + BlockWeight::::mutate(|current_weight| { + current_weight.set(Weight::from_parts(0, new_weight), DispatchClass::Normal); + }); + } + + #[test] + fn basic_refund() { + // The real cost will be 100 bytes of storage size + let mut test_ext = setup_test_externalities(&[0, 100]); + + test_ext.execute_with(|| { + set_current_storage_weight(1000); + + // Benchmarked storage weight: 500 + let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; + let post_info = PostDispatchInfo::default(); + + let pre = StorageWeightReclaim::(PhantomData) + .pre_dispatch(&ALICE, CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(0)); + + assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); + // We expect a refund of 400 + assert_ok!(StorageWeightReclaim::::post_dispatch( + Some(pre), + &info, + &post_info, + LEN, + &Ok(()) + )); + + assert_eq!(BlockWeight::::get().total().proof_size(), 600); + }) + } + + #[test] + fn does_nothing_without_extension() { + let mut test_ext = new_test_ext(); + + // Proof size extension not registered + test_ext.execute_with(|| { + set_current_storage_weight(1000); + + // Benchmarked storage weight: 500 + let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; + let post_info = PostDispatchInfo::default(); + + let pre = StorageWeightReclaim::(PhantomData) + .pre_dispatch(&ALICE, CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, None); + + assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); + assert_ok!(StorageWeightReclaim::::post_dispatch( + Some(pre), + &info, + &post_info, + LEN, + &Ok(()) + )); + + assert_eq!(BlockWeight::::get().total().proof_size(), 1000); + }) + } + + #[test] + fn negative_refund_is_added_to_weight() { + let mut test_ext = setup_test_externalities(&[100, 300]); + + test_ext.execute_with(|| { + set_current_storage_weight(1000); + // Benchmarked storage weight: 100 + let info = DispatchInfo { weight: Weight::from_parts(0, 100), ..Default::default() }; + let post_info = PostDispatchInfo::default(); + + let pre = StorageWeightReclaim::(PhantomData) + .pre_dispatch(&ALICE, CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(100)); + + // We expect no refund + assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); + assert_ok!(StorageWeightReclaim::::post_dispatch( + Some(pre), + &info, + &post_info, + LEN, + &Ok(()) + )); + + assert_eq!(BlockWeight::::get().total().proof_size(), 1100); + }) + } + + #[test] + fn test_zero_proof_size() { + let mut test_ext = setup_test_externalities(&[0, 0]); + + test_ext.execute_with(|| { + let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; + let post_info = PostDispatchInfo::default(); + + let pre = StorageWeightReclaim::(PhantomData) + .pre_dispatch(&ALICE, CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(0)); + + assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); + assert_ok!(StorageWeightReclaim::::post_dispatch( + Some(pre), + &info, + &post_info, + LEN, + &Ok(()) + )); + + assert_eq!(BlockWeight::::get().total().proof_size(), 0); + }); + } + + #[test] + fn test_larger_pre_dispatch_proof_size() { + let mut test_ext = setup_test_externalities(&[300, 100]); + + test_ext.execute_with(|| { + set_current_storage_weight(1300); + + let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; + let post_info = PostDispatchInfo::default(); + + let pre = StorageWeightReclaim::(PhantomData) + .pre_dispatch(&ALICE, CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(300)); + + assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); + assert_ok!(StorageWeightReclaim::::post_dispatch( + Some(pre), + &info, + &post_info, + LEN, + &Ok(()) + )); + + assert_eq!(BlockWeight::::get().total().proof_size(), 800); + }); + } + + #[test] + fn test_incorporates_check_weight_unspent_weight() { + let mut test_ext = setup_test_externalities(&[100, 300]); + + test_ext.execute_with(|| { + set_current_storage_weight(1000); + + // Benchmarked storage weight: 300 + let info = DispatchInfo { weight: Weight::from_parts(100, 300), ..Default::default() }; + + // Actual weight is 50 + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(50, 250)), + pays_fee: Default::default(), + }; + + let pre = StorageWeightReclaim::(PhantomData) + .pre_dispatch(&ALICE, CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(100)); + + // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` + // we always need to call `post_dispatch` to verify that they interoperate correctly. + assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); + assert_ok!(StorageWeightReclaim::::post_dispatch( + Some(pre), + &info, + &post_info, + LEN, + &Ok(()) + )); + + assert_eq!(BlockWeight::::get().total().proof_size(), 900); + }) + } + + #[test] + fn test_incorporates_check_weight_unspent_weight_on_negative() { + let mut test_ext = setup_test_externalities(&[100, 300]); + + test_ext.execute_with(|| { + set_current_storage_weight(1000); + // Benchmarked storage weight: 50 + let info = DispatchInfo { weight: Weight::from_parts(100, 50), ..Default::default() }; + + // Actual weight is 25 + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(50, 25)), + pays_fee: Default::default(), + }; + + let pre = StorageWeightReclaim::(PhantomData) + .pre_dispatch(&ALICE, CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(100)); + + // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` + // we always need to call `post_dispatch` to verify that they interoperate correctly. + assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); + assert_ok!(StorageWeightReclaim::::post_dispatch( + Some(pre), + &info, + &post_info, + LEN, + &Ok(()) + )); + + assert_eq!(BlockWeight::::get().total().proof_size(), 1150); + }) + } + + #[test] + fn test_incorporates_check_weight_unspent_weight_reverse_order() { + let mut test_ext = setup_test_externalities(&[100, 300]); + + test_ext.execute_with(|| { + set_current_storage_weight(1000); + + // Benchmarked storage weight: 300 + let info = DispatchInfo { weight: Weight::from_parts(100, 300), ..Default::default() }; + + // Actual weight is 50 + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(50, 250)), + pays_fee: Default::default(), + }; + + let pre = StorageWeightReclaim::(PhantomData) + .pre_dispatch(&ALICE, CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(100)); + + assert_ok!(StorageWeightReclaim::::post_dispatch( + Some(pre), + &info, + &post_info, + LEN, + &Ok(()) + )); + // `CheckWeight` gets called after `StorageWeightReclaim` this time. + // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` + // we always need to call `post_dispatch` to verify that they interoperate correctly. + assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); + + assert_eq!(BlockWeight::::get().total().proof_size(), 900); + }) + } + + #[test] + fn test_incorporates_check_weight_unspent_weight_on_negative_reverse_order() { + let mut test_ext = setup_test_externalities(&[100, 300]); + + test_ext.execute_with(|| { + set_current_storage_weight(1000); + // Benchmarked storage weight: 50 + let info = DispatchInfo { weight: Weight::from_parts(100, 50), ..Default::default() }; + + // Actual weight is 25 + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(50, 25)), + pays_fee: Default::default(), + }; + + let pre = StorageWeightReclaim::(PhantomData) + .pre_dispatch(&ALICE, CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(100)); + + assert_ok!(StorageWeightReclaim::::post_dispatch( + Some(pre), + &info, + &post_info, + LEN, + &Ok(()) + )); + // `CheckWeight` gets called after `StorageWeightReclaim` this time. + // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` + // we always need to call `post_dispatch` to verify that they interoperate correctly. + assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); + + assert_eq!(BlockWeight::::get().total().proof_size(), 1150); + }) + } + + #[test] + fn storage_size_reported_correctly() { + let mut test_ext = setup_test_externalities(&[1000]); + test_ext.execute_with(|| { + assert_eq!(get_proof_size(), Some(1000)); + }); + + let mut test_ext = new_test_ext(); + + let test_recorder = TestRecorder::new(&[0]); + + test_ext.register_extension(ProofSizeExt::new(test_recorder)); + + test_ext.execute_with(|| { + assert_eq!(get_proof_size(), Some(0)); + }); + } + + #[test] + fn storage_size_disabled_reported_correctly() { + let mut test_ext = setup_test_externalities(&[PROOF_RECORDING_DISABLED as usize]); + + test_ext.execute_with(|| { + assert_eq!(get_proof_size(), None); + }); + } + + #[test] + fn test_reclaim_helper() { + let mut test_ext = setup_test_externalities(&[1000, 1300, 1800]); + + test_ext.execute_with(|| { + let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(0, 2000)); + let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); + remaining_weight_meter.consume(Weight::from_parts(0, 500)); + let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); + + assert_eq!(reclaimed, Some(Weight::from_parts(0, 200))); + + remaining_weight_meter.consume(Weight::from_parts(0, 800)); + let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); + assert_eq!(reclaimed, Some(Weight::from_parts(0, 300))); + assert_eq!(remaining_weight_meter.remaining(), Weight::from_parts(0, 1200)); + }); + } + + #[test] + fn test_reclaim_helper_does_not_reclaim_negative() { + // Benchmarked weight does not change at all + let mut test_ext = setup_test_externalities(&[1000, 1300]); + + test_ext.execute_with(|| { + let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(0, 1000)); + let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); + let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); + + assert_eq!(reclaimed, Some(Weight::from_parts(0, 0))); + assert_eq!(remaining_weight_meter.remaining(), Weight::from_parts(0, 1000)); + }); + + // Benchmarked weight increases less than storage proof consumes + let mut test_ext = setup_test_externalities(&[1000, 1300]); + + test_ext.execute_with(|| { + let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(0, 1000)); + let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); + remaining_weight_meter.consume(Weight::from_parts(0, 0)); + let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); + + assert_eq!(reclaimed, Some(Weight::from_parts(0, 0))); + }); + } + + /// Just here for doc purposes + fn get_benched_weight() -> Weight { + Weight::from_parts(0, 5) + } + + /// Just here for doc purposes + fn do_work() {} + + #[docify::export_content(simple_reclaimer_example)] + fn reclaim_with_weight_meter() { + let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(10, 10)); + + let benched_weight = get_benched_weight(); + + // It is important to instantiate the `StorageWeightReclaimer` before we consume the weight + // for a piece of work from the weight meter. + let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); + + if remaining_weight_meter.try_consume(benched_weight).is_ok() { + // Perform some work that takes has `benched_weight` storage weight. + do_work(); + + // Reclaimer will detect that we only consumed 2 bytes, so 3 bytes are reclaimed. + let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); + + // We reclaimed 3 bytes of storage size! + assert_eq!(reclaimed, Some(Weight::from_parts(0, 3))); + assert_eq!(BlockWeight::::get().total().proof_size(), 10); + assert_eq!(remaining_weight_meter.remaining(), Weight::from_parts(10, 8)); + } + } + + #[test] + fn test_reclaim_helper_works_with_meter() { + // The node will report 12 - 10 = 2 consumed storage size between the calls. + let mut test_ext = setup_test_externalities(&[10, 12]); + + test_ext.execute_with(|| { + // Initial storage size is 10. + set_current_storage_weight(10); + reclaim_with_weight_meter(); + }); + } +} diff --git a/cumulus/primitives/utility/Cargo.toml b/cumulus/primitives/utility/Cargo.toml index 45c0e6670942e6b9b1ba0e6b1e80a1f0b357ff50..1e2c300b9ba257d2c8fb998689ae45847099dd63 100644 --- a/cumulus/primitives/utility/Cargo.toml +++ b/cumulus/primitives/utility/Cargo.toml @@ -26,7 +26,6 @@ polkadot-runtime-parachains = { path = "../../../polkadot/runtime/parachains", d xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../../../polkadot/xcm/xcm-executor", default-features = false } xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false } -pallet-xcm-benchmarks = { path = "../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false } # Cumulus cumulus-primitives-core = { path = "../core", default-features = false } @@ -39,7 +38,6 @@ std = [ "frame-support/std", "log/std", "pallet-asset-conversion/std", - "pallet-xcm-benchmarks/std", "polkadot-runtime-common/std", "polkadot-runtime-parachains/std", "sp-io/std", @@ -54,7 +52,6 @@ runtime-benchmarks = [ "cumulus-primitives-core/runtime-benchmarks", "frame-support/runtime-benchmarks", "pallet-asset-conversion/runtime-benchmarks", - "pallet-xcm-benchmarks/runtime-benchmarks", "polkadot-runtime-common/runtime-benchmarks", "polkadot-runtime-parachains/runtime-benchmarks", "sp-runtime/runtime-benchmarks", diff --git a/cumulus/primitives/utility/src/lib.rs b/cumulus/primitives/utility/src/lib.rs index 0d8921227429c5c1e0f62a4f3c3f93db4ad73bcb..abc391bdcb8ed6ac3682c761e6a0a8d2d0e7f14b 100644 --- a/cumulus/primitives/utility/src/lib.rs +++ b/cumulus/primitives/utility/src/lib.rs @@ -760,7 +760,7 @@ mod test_trader { } } -/// Implementation of `pallet_xcm_benchmarks::EnsureDelivery` which helps to ensure delivery to the +/// Implementation of `xcm_builder::EnsureDelivery` which helps to ensure delivery to the /// parent relay chain. Deposits existential deposit for origin (if needed). /// Deposits estimated fee to the origin account (if needed). /// Allows to trigger additional logic for specific `ParaId` (e.g. open HRMP channel) (if neeeded). @@ -774,17 +774,22 @@ impl< XcmConfig: xcm_executor::Config, ExistentialDeposit: Get>, PriceForDelivery: PriceForMessageDelivery, - > pallet_xcm_benchmarks::EnsureDelivery + > xcm_builder::EnsureDelivery for ToParentDeliveryHelper { fn ensure_successful_delivery( origin_ref: &Location, - _dest: &Location, + dest: &Location, fee_reason: xcm_executor::traits::FeeReason, ) -> (Option, Option) { use xcm::latest::{MAX_INSTRUCTIONS_TO_DECODE, MAX_ITEMS_IN_ASSETS}; use xcm_executor::{traits::FeeManager, FeesMode}; + // check if the destination is relay/parent + if dest.ne(&Location::parent()) { + return (None, None); + } + let mut fees_mode = None; if !XcmConfig::FeeManager::is_waived(Some(origin_ref), fee_reason) { // if not waived, we need to set up accounts for paying and receiving fees diff --git a/cumulus/test/client/Cargo.toml b/cumulus/test/client/Cargo.toml index 7190172101cb509f7dd7c19ad25dc6d4d54036e7..028733ce23554967ee46e5ae8f2adebf6da70eb6 100644 --- a/cumulus/test/client/Cargo.toml +++ b/cumulus/test/client/Cargo.toml @@ -41,6 +41,7 @@ cumulus-test-relay-sproof-builder = { path = "../relay-sproof-builder" } cumulus-primitives-core = { path = "../../primitives/core" } cumulus-primitives-proof-size-hostfunction = { path = "../../primitives/proof-size-hostfunction" } cumulus-primitives-parachain-inherent = { path = "../../primitives/parachain-inherent" } +cumulus-primitives-storage-weight-reclaim = { path = "../../primitives/storage-weight-reclaim" } [features] runtime-benchmarks = [ diff --git a/cumulus/test/client/src/lib.rs b/cumulus/test/client/src/lib.rs index df63f683de6b4312a953bbbf9f03862eac7ce451..c46f4da7f6788cf53dfdd6d7be5c221d81562e70 100644 --- a/cumulus/test/client/src/lib.rs +++ b/cumulus/test/client/src/lib.rs @@ -151,6 +151,7 @@ pub fn generate_extrinsic_with_pair( frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::::new(), ); let function = function.into(); @@ -158,7 +159,7 @@ pub fn generate_extrinsic_with_pair( let raw_payload = SignedPayload::from_raw( function.clone(), extra.clone(), - ((), VERSION.spec_version, genesis_block, current_block_hash, (), (), ()), + ((), VERSION.spec_version, genesis_block, current_block_hash, (), (), (), ()), ); let signature = raw_payload.using_encoded(|e| origin.sign(e)); @@ -203,13 +204,16 @@ pub fn validate_block( let mut ext_ext = ext.ext(); let heap_pages = HeapAllocStrategy::Static { extra_pages: 1024 }; - let executor = WasmExecutor::::builder() - .with_execution_method(WasmExecutionMethod::default()) - .with_max_runtime_instances(1) - .with_runtime_cache_size(2) - .with_onchain_heap_alloc_strategy(heap_pages) - .with_offchain_heap_alloc_strategy(heap_pages) - .build(); + let executor = WasmExecutor::<( + sp_io::SubstrateHostFunctions, + cumulus_primitives_proof_size_hostfunction::storage_proof_size::HostFunctions, + )>::builder() + .with_execution_method(WasmExecutionMethod::default()) + .with_max_runtime_instances(1) + .with_runtime_cache_size(2) + .with_onchain_heap_alloc_strategy(heap_pages) + .with_offchain_heap_alloc_strategy(heap_pages) + .build(); executor .uncached_call( diff --git a/cumulus/test/runtime/Cargo.toml b/cumulus/test/runtime/Cargo.toml index 5902a62512bed772318145ccdb954ff2dfef4c92..449a8b819bc074e0c891d99d6fa2f42480f56ce6 100644 --- a/cumulus/test/runtime/Cargo.toml +++ b/cumulus/test/runtime/Cargo.toml @@ -39,6 +39,7 @@ sp-version = { path = "../../../substrate/primitives/version", default-features # Cumulus cumulus-pallet-parachain-system = { path = "../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } cumulus-primitives-core = { path = "../../primitives/core", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "../../primitives/storage-weight-reclaim", default-features = false } [build-dependencies] substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder", optional = true } @@ -49,6 +50,7 @@ std = [ "codec/std", "cumulus-pallet-parachain-system/std", "cumulus-primitives-core/std", + "cumulus-primitives-storage-weight-reclaim/std", "frame-executive/std", "frame-support/std", "frame-system-rpc-runtime-api/std", diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 6068f895c83bf6adba5e2d7fa68b3aa2b8821204..5ccec8983e91fa2e50a242e191ee79ec8e9655cf 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -331,6 +331,7 @@ pub type SignedExtra = ( frame_system::CheckNonce, frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = @@ -376,7 +377,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } diff --git a/cumulus/test/service/Cargo.toml b/cumulus/test/service/Cargo.toml index 66bc9c6669785a586eb8938224018ae78b5e7169..27273f4e0a8d35cfa1a21f45497c61a9b417d718 100644 --- a/cumulus/test/service/Cargo.toml +++ b/cumulus/test/service/Cargo.toml @@ -19,8 +19,8 @@ codec = { package = "parity-scale-codec", version = "3.0.0" } criterion = { version = "0.5.1", features = ["async_tokio"] } jsonrpsee = { version = "0.22", features = ["server"] } rand = "0.8.5" -serde = { version = "1.0.196", features = ["derive"] } -serde_json = "1.0.113" +serde = { features = ["derive"], workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } tokio = { version = "1.32.0", features = ["macros"] } tracing = "0.1.37" url = "2.4.0" @@ -81,6 +81,7 @@ cumulus-relay-chain-minimal-node = { path = "../../client/relay-chain-minimal-no cumulus-client-pov-recovery = { path = "../../client/pov-recovery" } cumulus-test-relay-sproof-builder = { path = "../relay-sproof-builder" } cumulus-pallet-parachain-system = { path = "../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-primitives-storage-weight-reclaim = { path = "../../primitives/storage-weight-reclaim" } pallet-timestamp = { path = "../../../substrate/frame/timestamp" } [dev-dependencies] diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index 229bde4d19c54c1077ef8d3f16f85426dd1b0397..3554a383f219e7465f5df875bdf6463b93119f3b 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -67,7 +67,7 @@ use sc_network::{ use sc_service::{ config::{ BlocksPruning, DatabaseSource, KeystoreConfig, MultiaddrWithPeerId, NetworkConfiguration, - OffchainWorkerConfig, PruningMode, WasmExecutionMethod, + OffchainWorkerConfig, PruningMode, RpcBatchRequestConfig, WasmExecutionMethod, }, BasePath, ChainSpec as ChainSpecService, Configuration, Error as ServiceError, PartialComponents, Role, RpcHandlers, TFullBackend, TFullClient, TaskManager, @@ -112,7 +112,7 @@ pub type AnnounceBlockFn = Arc>) + Send + Sync>; pub struct RuntimeExecutor; impl sc_executor::NativeExecutionDispatch for RuntimeExecutor { - type ExtendHostFunctions = (); + type ExtendHostFunctions = cumulus_client_service::storage_proof_size::HostFunctions; fn dispatch(method: &str, data: &[u8]) -> Option> { cumulus_test_runtime::api::dispatch(method, data) @@ -801,6 +801,7 @@ pub fn node_config( rpc_max_subs_per_conn: Default::default(), rpc_port: 9945, rpc_message_buffer_capacity: Default::default(), + rpc_batch_config: RpcBatchRequestConfig::Unlimited, rpc_rate_limit: None, prometheus_config: None, telemetry_endpoints: None, @@ -893,11 +894,12 @@ pub fn construct_extrinsic( frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::::new(), ); let raw_payload = runtime::SignedPayload::from_raw( function.clone(), extra.clone(), - ((), runtime::VERSION.spec_version, genesis_block, current_block_hash, (), (), ()), + ((), runtime::VERSION.spec_version, genesis_block, current_block_hash, (), (), (), ()), ); let signature = raw_payload.using_encoded(|e| caller.sign(e)); runtime::UncheckedExtrinsic::new_signed( diff --git a/cumulus/zombienet/tests/0002-pov_recovery.toml b/cumulus/zombienet/tests/0002-pov_recovery.toml index fe42fd4b2f6681154e89d5e7274618a8f307dfab..15a61eba2a0342bce55f805473d2c3ba9b51c7f8 100644 --- a/cumulus/zombienet/tests/0002-pov_recovery.toml +++ b/cumulus/zombienet/tests/0002-pov_recovery.toml @@ -4,7 +4,7 @@ default_command = "polkadot" chain = "rococo-local" -[relaychain.genesis.runtimeGenesis.patch.configuration.config] +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] # set parameters such that collators only connect to 1 validator as a backing group max_validators_per_core = 1 group_rotation_frequency = 100 # 10 mins diff --git a/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile b/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile index fde9cc6e7cf3cbf3ca21f7d6b489ad2a637d1f42..4bfb73acda05880ef49570594d0769d1e5e4b147 100644 --- a/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile +++ b/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile @@ -45,7 +45,7 @@ RUN mkdir -p /home/nonroot/bridges-polkadot-sdk COPY ./artifacts/bridges-polkadot-sdk /home/nonroot/bridges-polkadot-sdk # also prepare `generate_hex_encoded_call` for running RUN set -eux; \ - cd /home/nonroot/bridges-polkadot-sdk/bridges/testing/utils/generate_hex_encoded_call; \ + cd /home/nonroot/bridges-polkadot-sdk/bridges/testing/framework/utils/generate_hex_encoded_call; \ npm install # check if executable works in this container diff --git a/docs/RELEASE.md b/docs/RELEASE.md index 6d681d78f367abff313c63adb1559ea6aa06dbc7..e73be2779a99426203e209da846f938c0f73cceb 100644 --- a/docs/RELEASE.md +++ b/docs/RELEASE.md @@ -18,10 +18,16 @@ Rococo. To easily refer to a release, it shall be named by its date in the form ## Crate -We try to follow [SemVer 2.0.0](https://semver.org/) as best as possible for versioning our crates. SemVer requires a -piece of software to first declare a public API. The public API of the Polkadot SDK is hereby declared as the sum of all -crates' public APIs. +We try to follow [SemVer 2.0.0](https://semver.org/) as best as possible for versioning our crates. The definitions of +`major`, `minor` and `patch` version for Rust crates are slightly altered from their standard for pre `1.0.0` versions. +Quoting [rust-lang.org](https://doc.rust-lang.org/cargo/reference/semver.html): +>Initial development releases starting with “0.y.z” can treat changes in “y” as a major release, and “z” as a minor +release. “0.0.z” releases are always major changes. This is because Cargo uses the convention that only changes in the +left-most non-zero component are considered incompatible. + +SemVer requires a piece of software to first declare a public API. The public API of the Polkadot SDK +is hereby declared as the sum of all crates' public APIs. Inductively, the public API of our library crates is declared as all public items that are neither: - Inside a `__private` module diff --git a/docs/mermaid/IA.mmd b/docs/mermaid/IA.mmd index 93d3e92814cf1307fa7c0ea9f290ed21edae58fb..4eb50bcf96a8932de3fa90748fcfeb3ca7f02a5f 100644 --- a/docs/mermaid/IA.mmd +++ b/docs/mermaid/IA.mmd @@ -3,7 +3,7 @@ flowchart devhub --> polkadot_sdk devhub --> reference_docs - devhub --> tutorial + devhub --> guides polkadot_sdk --> substrate polkadot_sdk --> frame diff --git a/docs/mermaid/outer_runtime_types.mmd b/docs/mermaid/outer_runtime_types.mmd new file mode 100644 index 0000000000000000000000000000000000000000..c909df16af1ffbb697e3d363634fa218005a9c32 --- /dev/null +++ b/docs/mermaid/outer_runtime_types.mmd @@ -0,0 +1,3 @@ +flowchart LR + RuntimeCall --"TryInto"--> PalletCall + PalletCall --"Into"--> RuntimeCall diff --git a/docs/sdk/Cargo.toml b/docs/sdk/Cargo.toml index 05aced8751aee8286c78ca4f5baeb398a0ac83f6..8b498d407c02574a645420ea3d221b49177d7411 100644 --- a/docs/sdk/Cargo.toml +++ b/docs/sdk/Cargo.toml @@ -17,9 +17,13 @@ workspace = true # Needed for all FRAME-based code parity-scale-codec = { version = "3.0.0", default-features = false } scale-info = { version = "2.6.0", default-features = false } -frame = { path = "../../substrate/frame", features = ["experimental", "runtime"] } +frame = { path = "../../substrate/frame", features = [ + "experimental", + "runtime", +] } pallet-examples = { path = "../../substrate/frame/examples" } pallet-default-config-example = { path = "../../substrate/frame/examples/default-config" } +pallet-example-offchain-worker = { path = "../../substrate/frame/examples/offchain-worker" } # How we build docs in rust-docs simple-mermaid = "0.1.1" @@ -30,8 +34,12 @@ node-cli = { package = "staging-node-cli", path = "../../substrate/bin/node/cli" kitchensink-runtime = { path = "../../substrate/bin/node/runtime" } chain-spec-builder = { package = "staging-chain-spec-builder", path = "../../substrate/bin/utils/chain-spec-builder" } subkey = { path = "../../substrate/bin/utils/subkey" } +frame-system = { path = "../../substrate/frame/system", default-features = false } +frame-support = { path = "../../substrate/frame/support", default-features = false } +frame-executive = { path = "../../substrate/frame/executive", default-features = false } +pallet-example-single-block-migrations = { path = "../../substrate/frame/examples/single-block-migrations" } -# Substrate +# Substrate Client sc-network = { path = "../../substrate/client/network" } sc-rpc-api = { path = "../../substrate/client/rpc-api" } sc-rpc = { path = "../../substrate/client/rpc" } @@ -43,6 +51,7 @@ sc-consensus-grandpa = { path = "../../substrate/client/consensus/grandpa" } sc-consensus-beefy = { path = "../../substrate/client/consensus/beefy" } sc-consensus-manual-seal = { path = "../../substrate/client/consensus/manual-seal" } sc-consensus-pow = { path = "../../substrate/client/consensus/pow" } + substrate-wasm-builder = { path = "../../substrate/utils/wasm-builder" } # Cumulus @@ -52,7 +61,19 @@ cumulus-pallet-parachain-system = { path = "../../cumulus/pallets/parachain-syst ] } parachain-info = { package = "staging-parachain-info", path = "../../cumulus/parachains/pallets/parachain-info" } pallet-aura = { path = "../../substrate/frame/aura", default-features = false } + +# Pallets and FRAME internals pallet-timestamp = { path = "../../substrate/frame/timestamp" } +pallet-balances = { path = "../../substrate/frame/balances" } +pallet-assets = { path = "../../substrate/frame/assets" } +pallet-transaction-payment = { path = "../../substrate/frame/transaction-payment" } +pallet-utility = { path = "../../substrate/frame/utility" } +pallet-multisig = { path = "../../substrate/frame/multisig" } +pallet-proxy = { path = "../../substrate/frame/proxy" } +pallet-authorship = { path = "../../substrate/frame/authorship" } +pallet-collective = { path = "../../substrate/frame/collective" } +pallet-democracy = { path = "../../substrate/frame/democracy" } +pallet-scheduler = { path = "../../substrate/frame/scheduler" } # Primitives sp-io = { path = "../../substrate/primitives/io" } @@ -60,13 +81,11 @@ sp-api = { path = "../../substrate/primitives/api" } sp-core = { path = "../../substrate/primitives/core" } sp-keyring = { path = "../../substrate/primitives/keyring" } sp-runtime = { path = "../../substrate/primitives/runtime" } +sp-offchain = { path = "../../substrate/primitives/offchain" } +sp-version = { path = "../../substrate/primitives/version" } # XCM xcm = { package = "staging-xcm", path = "../../polkadot/xcm" } -[dev-dependencies] -parity-scale-codec = "3.6.5" -scale-info = "2.9.0" - [features] experimental = ["pallet-aura/experimental"] diff --git a/docs/sdk/headers/header.html b/docs/sdk/headers/header.html new file mode 100644 index 0000000000000000000000000000000000000000..e28458c4ccc791d9a72613ffb530b685828ea828 --- /dev/null +++ b/docs/sdk/headers/header.html @@ -0,0 +1,144 @@ + + + diff --git a/docs/sdk/headers/theme.css b/docs/sdk/headers/theme.css new file mode 100644 index 0000000000000000000000000000000000000000..a488e15c36b70ee76b14f429434daf0717bc0320 --- /dev/null +++ b/docs/sdk/headers/theme.css @@ -0,0 +1,17 @@ +:root { + --polkadot-pink: #E6007A; + --polkadot-green: #56F39A; + --polkadot-lime: #D3FF33; + --polkadot-cyan: #00B2FF; + --polkadot-purple: #552BBF; +} + +body.sdk-docs { + nav.sidebar>div.sidebar-crate>a>img { + width: 190px; + } + + nav.sidebar { + flex: 0 0 250px; + } +} diff --git a/docs/sdk/headers/toc.html b/docs/sdk/headers/toc.html deleted file mode 100644 index a4a074cb4f3153cb135da8608462d4ecf59144cb..0000000000000000000000000000000000000000 --- a/docs/sdk/headers/toc.html +++ /dev/null @@ -1,54 +0,0 @@ - - diff --git a/docs/sdk/src/guides/your_first_pallet/mod.rs b/docs/sdk/src/guides/your_first_pallet/mod.rs index 29cdda36ed15ba3fcf964494732fc8364defff45..6a26292482477a712f34b787b5c82b6cebab9947 100644 --- a/docs/sdk/src/guides/your_first_pallet/mod.rs +++ b/docs/sdk/src/guides/your_first_pallet/mod.rs @@ -128,8 +128,8 @@ //! //! Recall that within our pallet, (almost) all blocks of code are generic over ``. And, //! because `trait Config: frame_system::Config`, we can get access to all items in `Config` (or -//! `frame_system::Config`) using `T::NameOfItem`. This is all within the boundaries of how Rust -//! traits and generics work. If unfamiliar with this pattern, read +//! `frame_system::Config`) using `T::NameOfItem`. This is all within the boundaries of how +//! Rust traits and generics work. If unfamiliar with this pattern, read //! [`crate::reference_docs::trait_based_programming`] before going further. //! //! Crucially, a typical FRAME runtime contains a `struct Runtime`. The main role of this `struct` @@ -250,14 +250,16 @@ // of event is probably not the best. //! //! With the explanation out of the way, let's see how these components can be added. Both follow a -//! fairly familiar syntax: normal Rust enums, with an extra `#[frame::event/error]` attribute -//! attached. +//! fairly familiar syntax: normal Rust enums, with extra +//! [`#[frame::event]`](frame::pallet_macros::event) and +//! [`#[frame::error]`](frame::pallet_macros::error) attributes attached. #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", Event)] #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", Error)] //! -//! One slightly custom part of this is the `#[pallet::generate_deposit(pub(super) fn -//! deposit_event)]` part. Without going into too much detail, in order for a pallet to emit events -//! to the rest of the system, it needs to do two things: +//! One slightly custom part of this is the [`#[pallet::generate_deposit(pub(super) fn +//! deposit_event)]`](frame::pallet_macros::generate_deposit) part. Without going into too +//! much detail, in order for a pallet to emit events to the rest of the system, it needs to do two +//! things: //! //! 1. Declare a type in its `Config` that refers to the overarching event type of the runtime. In //! short, by doing this, the pallet is expressing an important bound: `type RuntimeEvent: @@ -266,11 +268,12 @@ //! store it where needed. //! //! 2. But, doing this conversion and storing is too much to expect each pallet to define. FRAME -//! provides a default way of storing events, and this is what `pallet::generate_deposit` is doing. +//! provides a default way of storing events, and this is what +//! [`pallet::generate_deposit`](frame::pallet_macros::generate_deposit) is doing. #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", config_v2)] //! //! > These `Runtime*` types are better explained in -//! > [`crate::reference_docs::frame_composite_enums`]. +//! > [`crate::reference_docs::frame_runtime_types`]. //! //! Then, we can rewrite the `transfer` dispatchable as such: #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", transfer_v2)] @@ -280,12 +283,12 @@ #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", runtime_v2)] //! //! In this snippet, the actual `RuntimeEvent` type (right hand side of `type RuntimeEvent = -//! RuntimeEvent`) is generated by `construct_runtime`. An interesting way to inspect this type is -//! to see its definition in rust-docs: +//! RuntimeEvent`) is generated by +//! [`construct_runtime`](frame::runtime::prelude::construct_runtime). An interesting way to inspect +//! this type is to see its definition in rust-docs: //! [`crate::guides::your_first_pallet::pallet_v2::tests::runtime_v2::RuntimeEvent`]. //! //! -//! //! ## What Next? //! //! The following topics where used in this guide, but not covered in depth. It is suggested to @@ -293,7 +296,7 @@ //! //! - [`crate::reference_docs::safe_defensive_programming`]. //! - [`crate::reference_docs::frame_origin`]. -//! - [`crate::reference_docs::frame_composite_enums`]. +//! - [`crate::reference_docs::frame_runtime_types`]. //! - The pallet we wrote in this guide was using `dev_mode`, learn more in //! [`frame::pallet_macros::config`]. //! - Learn more about the individual pallet items/macros, such as event and errors and call, in diff --git a/docs/sdk/src/lib.rs b/docs/sdk/src/lib.rs index 075d9ddaffe5bd95bafa4ca1d06667c253d4a21b..e211476d2514419a3d0889ef426817342155c178 100644 --- a/docs/sdk/src/lib.rs +++ b/docs/sdk/src/lib.rs @@ -15,7 +15,7 @@ //! - Start by learning about the the [`polkadot_sdk`], its structure and context. //! - Then, head over the [`guides`]. This modules contains in-depth guides about the most important //! user-journeys of the Polkadot SDK. -//! - Whilst reading the guides, you might find back-links to [`crate::reference_docs`]. +//! - Whilst reading the guides, you might find back-links to [`reference_docs`]. //! - Finally, is the parent website of this crate that contains the //! list of further tools related to the Polkadot SDK. //! @@ -25,6 +25,11 @@ #![doc = simple_mermaid::mermaid!("../../mermaid/IA.mmd")] #![warn(rustdoc::broken_intra_doc_links)] #![warn(rustdoc::private_intra_doc_links)] +#![doc(html_favicon_url = "https://polkadot.network/favicon-32x32.png")] +#![doc( + html_logo_url = "https://europe1.discourse-cdn.com/standard21/uploads/polkadot2/original/1X/eb57081e2bb7c39e5fcb1a98b443e423fa4448ae.svg" +)] +#![doc(issue_tracker_base_url = "https://github.com/paritytech/polkadot-sdk/issues")] /// Meta information about this crate, how it is built, what principles dictates its evolution and /// how one can contribute to it. diff --git a/docs/sdk/src/meta_contributing.rs b/docs/sdk/src/meta_contributing.rs index 7ecf8b0adfd3a2038bd32b6d6b830c71782cd1b2..fcdcea9934bb6b8cf7ee6ec090ebd0939888238c 100644 --- a/docs/sdk/src/meta_contributing.rs +++ b/docs/sdk/src/meta_contributing.rs @@ -101,7 +101,7 @@ //! * Before even getting started, what is with all of this ``? We link to //! [`crate::reference_docs::trait_based_programming`]. //! * First, the name. Why is this called `pallet::call`? This goes back to `enum Call`, which is -//! explained in [`crate::reference_docs::frame_composite_enums`]. Build on top of this! +//! explained in [`crate::reference_docs::frame_runtime_types`]. Build on top of this! //! * Then, what is `origin`? Just an account id? [`crate::reference_docs::frame_origin`]. //! * Then, what is `DispatchResult`? Why is this called *dispatch*? Probably something that can be //! explained in the documentation of [`frame::prelude::DispatchResult`]. @@ -138,7 +138,9 @@ //! injected, run: //! //! ```sh -//! SKIP_WASM_BUILD=1 RUSTDOCFLAGS="--html-in-header $(pwd)/docs/sdk/headers/toc.html" cargo doc -p polkadot-sdk-docs --no-deps --open +//! SKIP_WASM_BUILD=1 \ +//! RUSTDOCFLAGS="--html-in-header $(pwd)/docs/sdk/headers/header.html --extend-css $(pwd)/docs/sdk/headers/theme.css --default-theme=ayu" \ +//! cargo doc -p polkadot-sdk-docs --no-deps --open //! ``` //! //! If even faster build time for docs is needed, you can temporarily remove most of the diff --git a/docs/sdk/src/polkadot_sdk/frame_runtime.rs b/docs/sdk/src/polkadot_sdk/frame_runtime.rs index c9eba7d64bd46b51230c1b06a96c6b89ffddd73d..f178fc82bf4ba350ffe67b8a237319f8542d686b 100644 --- a/docs/sdk/src/polkadot_sdk/frame_runtime.rs +++ b/docs/sdk/src/polkadot_sdk/frame_runtime.rs @@ -87,93 +87,89 @@ //! * writing a runtime in pure Rust, as done in [this template](https://github.com/JoshOrndorff/frameless-node-template). //! * writing a runtime in AssemblyScript,as explored in [this project](https://github.com/LimeChain/subsembly). -#[cfg(test)] -mod tests { - use frame::prelude::*; +use frame::prelude::*; - /// A FRAME based pallet. This `mod` is the entry point for everything else. All - /// `#[pallet::xxx]` macros must be defined in this `mod`. Although, frame also provides an - /// experimental feature to break these parts into different `mod`s. See [`pallet_examples`] for - /// more. - #[docify::export] - #[frame::pallet(dev_mode)] - pub mod pallet { - use super::*; +/// A FRAME based pallet. This `mod` is the entry point for everything else. All +/// `#[pallet::xxx]` macros must be defined in this `mod`. Although, frame also provides an +/// experimental feature to break these parts into different `mod`s. See [`pallet_examples`] for +/// more. +#[docify::export] +#[frame::pallet(dev_mode)] +pub mod pallet { + use super::*; - /// The configuration trait of a pallet. Mandatory. Allows a pallet to receive types at a - /// later point from the runtime that wishes to contain it. It allows the pallet to be - /// parameterized over both types and values. - #[pallet::config] - pub trait Config: frame_system::Config { - /// A type that is not known now, but the runtime that will contain this pallet will - /// know it later, therefore we define it here as an associated type. - type RuntimeEvent: IsType<::RuntimeEvent> - + From>; + /// The configuration trait of a pallet. Mandatory. Allows a pallet to receive types at a + /// later point from the runtime that wishes to contain it. It allows the pallet to be + /// parameterized over both types and values. + #[pallet::config] + pub trait Config: frame_system::Config { + /// A type that is not known now, but the runtime that will contain this pallet will + /// know it later, therefore we define it here as an associated type. + type RuntimeEvent: IsType<::RuntimeEvent> + From>; - /// A parameterize-able value that we receive later via the `Get<_>` trait. - type ValueParameter: Get; + /// A parameterize-able value that we receive later via the `Get<_>` trait. + type ValueParameter: Get; - /// Similar to [`Config::ValueParameter`], but using `const`. Both are functionally - /// equal, but offer different tradeoffs. - const ANOTHER_VALUE_PARAMETER: u32; - } + /// Similar to [`Config::ValueParameter`], but using `const`. Both are functionally + /// equal, but offer different tradeoffs. + const ANOTHER_VALUE_PARAMETER: u32; + } - /// A mandatory struct in each pallet. All functions callable by external users (aka. - /// transactions) must be attached to this type (see [`frame::pallet_macros::call`]). For - /// convenience, internal (private) functions can also be attached to this type. - #[pallet::pallet] - pub struct Pallet(PhantomData); + /// A mandatory struct in each pallet. All functions callable by external users (aka. + /// transactions) must be attached to this type (see [`frame::pallet_macros::call`]). For + /// convenience, internal (private) functions can also be attached to this type. + #[pallet::pallet] + pub struct Pallet(PhantomData); - /// The events tha this pallet can emit. - #[pallet::event] - pub enum Event {} + /// The events tha this pallet can emit. + #[pallet::event] + pub enum Event {} - /// A storage item that this pallet contains. This will be part of the state root trie/root - /// of the blockchain. - #[pallet::storage] - pub type Value = StorageValue; + /// A storage item that this pallet contains. This will be part of the state root trie/root + /// of the blockchain. + #[pallet::storage] + pub type Value = StorageValue; - /// All *dispatchable* call functions (aka. transactions) are attached to `Pallet` in a - /// `impl` block. - #[pallet::call] - impl Pallet { - /// This will be callable by external users, and has two u32s as a parameter. - pub fn some_dispatchable( - _origin: OriginFor, - _param: u32, - _other_para: u32, - ) -> DispatchResult { - Ok(()) - } + /// All *dispatchable* call functions (aka. transactions) are attached to `Pallet` in a + /// `impl` block. + #[pallet::call] + impl Pallet { + /// This will be callable by external users, and has two u32s as a parameter. + pub fn some_dispatchable( + _origin: OriginFor, + _param: u32, + _other_para: u32, + ) -> DispatchResult { + Ok(()) } } +} - /// A simple runtime that contains the above pallet and `frame_system`, the mandatory pallet of - /// all runtimes. This runtime is for testing, but it shares a lot of similarities with a *real* - /// runtime. - #[docify::export] - pub mod runtime { - use super::pallet as pallet_example; - use frame::{prelude::*, testing_prelude::*}; - - // The major macro that amalgamates pallets into `enum Runtime` - construct_runtime!( - pub enum Runtime { - System: frame_system, - Example: pallet_example, - } - ); +/// A simple runtime that contains the above pallet and `frame_system`, the mandatory pallet of +/// all runtimes. This runtime is for testing, but it shares a lot of similarities with a *real* +/// runtime. +#[docify::export] +pub mod runtime { + use super::pallet as pallet_example; + use frame::{prelude::*, testing_prelude::*}; - // These `impl` blocks specify the parameters of each pallet's `trait Config`. - #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] - impl frame_system::Config for Runtime { - type Block = MockBlock; + // The major macro that amalgamates pallets into `enum Runtime` + construct_runtime!( + pub enum Runtime { + System: frame_system, + Example: pallet_example, } + ); - impl pallet_example::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type ValueParameter = ConstU32<42>; - const ANOTHER_VALUE_PARAMETER: u32 = 42; - } + // These `impl` blocks specify the parameters of each pallet's `trait Config`. + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Runtime { + type Block = MockBlock; + } + + impl pallet_example::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type ValueParameter = ConstU32<42>; + const ANOTHER_VALUE_PARAMETER: u32 = 42; } } diff --git a/docs/sdk/src/reference_docs/development_environment_advice.rs b/docs/sdk/src/reference_docs/development_environment_advice.rs index 4317695979367b32bfb673d2d368399c5ca35faa..21bbe78836c44b8afd70cab68e4b9b2f929fb4a0 100644 --- a/docs/sdk/src/reference_docs/development_environment_advice.rs +++ b/docs/sdk/src/reference_docs/development_environment_advice.rs @@ -111,3 +111,74 @@ //! If you have a powerful remote server available, you may consider using //! [cargo-remote](https://github.com/sgeisler/cargo-remote) to execute cargo commands on it, //! freeing up local resources for other tasks like `rust-analyzer`. +//! +//! When using `cargo-remote`, you can configure your editor to perform the the typical +//! "check-on-save" remotely as well. The configuration for VSCode is as follows: +//! +//! ```json +//! { +//! "rust-analyzer.cargo.buildScripts.overrideCommand": [ +//! "cargo", +//! "remote", +//! "--build-env", +//! "SKIP_WASM_BUILD=1", +//! "--", +//! "check", +//! "--message-format=json", +//! "--all-targets", +//! "--all-features", +//! "--target-dir=target/rust-analyzer" +//! ], +//! "rust-analyzer.check.overrideCommand": [ +//! "cargo", +//! "remote", +//! "--build-env", +//! "SKIP_WASM_BUILD=1", +//! "--", +//! "check", +//! "--workspace", +//! "--message-format=json", +//! "--all-targets", +//! "--all-features", +//! "--target-dir=target/rust-analyzer" +//! ], +//! } +//! ``` +//! +//! //! and the same in Lua for `neovim/nvim-lspconfig`: +//! +//! ```lua +//! ["rust-analyzer"] = { +//! cargo = { +//! buildScripts = { +//! overrideCommand = { +//! "cargo", +//! "remote", +//! "--build-env", +//! "SKIP_WASM_BUILD=1", +//! "--", +//! "check", +//! "--message-format=json", +//! "--all-targets", +//! "--all-features", +//! "--target-dir=target/rust-analyzer" +//! }, +//! }, +//! check = { +//! overrideCommand = { +//! "cargo", +//! "remote", +//! "--build-env", +//! "SKIP_WASM_BUILD=1", +//! "--", +//! "check", +//! "--workspace", +//! "--message-format=json", +//! "--all-targets", +//! "--all-features", +//! "--target-dir=target/rust-analyzer" +//! }, +//! }, +//! }, +//! }, +//! ``` diff --git a/docs/sdk/src/reference_docs/extrinsic_encoding.rs b/docs/sdk/src/reference_docs/extrinsic_encoding.rs index 9008f8f835f5d6cd1705c9dd4e4e4e78fd3aa2d9..8c8568a228fad140115e68feb3eec717ef30170c 100644 --- a/docs/sdk/src/reference_docs/extrinsic_encoding.rs +++ b/docs/sdk/src/reference_docs/extrinsic_encoding.rs @@ -127,7 +127,7 @@ //! runtimes, a call is represented as an enum of enums, where the outer enum represents the FRAME //! pallet being called, and the inner enum represents the call being made within that pallet, and //! any arguments to it. Read more about the call enum -//! [here][crate::reference_docs::frame_composite_enums]. +//! [here][crate::reference_docs::frame_runtime_types]. //! //! FRAME `Call` enums are automatically generated, and end up looking something like this: #![doc = docify::embed!("./src/reference_docs/extrinsic_encoding.rs", call_data)] diff --git a/docs/sdk/src/reference_docs/frame_composite_enums.rs b/docs/sdk/src/reference_docs/frame_composite_enums.rs deleted file mode 100644 index 6051cd534467672b9831187ef5c8b814712f7d18..0000000000000000000000000000000000000000 --- a/docs/sdk/src/reference_docs/frame_composite_enums.rs +++ /dev/null @@ -1 +0,0 @@ -//! # FRAME Composite Enums diff --git a/docs/sdk/src/reference_docs/frame_offchain_workers.rs b/docs/sdk/src/reference_docs/frame_offchain_workers.rs new file mode 100644 index 0000000000000000000000000000000000000000..7999707e5ee018c4bb7634e7a506ff8fee8fa8ac --- /dev/null +++ b/docs/sdk/src/reference_docs/frame_offchain_workers.rs @@ -0,0 +1,115 @@ +//! # Offchain Workers +//! +//! This reference document explains how offchain workers work in Substrate and FRAME. The main +//! focus is upon FRAME's implementation of this functionality. Nonetheless, offchain workers are a +//! Substrate-provided feature and can be used with possible alternatives to [`frame`] as well. +//! +//! Offchain workers are a commonly misunderstood topic, therefore we explain them bottom-up, +//! starting at the fundamentals and then describing the developer interface. +//! +//! ## Context +//! +//! Recall from [`crate::reference_docs::wasm_meta_protocol`] that the node and the runtime +//! communicate with one another via host functions and runtime APIs. Many of these interactions +//! contribute to the actual state transition of the blockchain. For example [`sp_api::Core`] is the +//! main runtime API that is called to execute new blocks. +//! +//! Offchain workers are in principle not different in any way: It is a runtime API exposed by the +//! wasm blob ([`sp_offchain::OffchainWorkerApi`]), and the node software calls into it when it +//! deems fit. But, crucially, this API call is different in that: +//! +//! 1. It can have no impact on the state ie. it is _OFF (the) CHAIN_. If any state is altered +//! during the execution of this API call, it is discarded. +//! 2. It has access to an extended set of host functions that allow the wasm blob to do more. For +//! example, call into HTTP requests. +//! +//! > The main way through which an offchain worker can interact with the state is by submitting an +//! > extrinsic to the chain. This is the ONLY way to alter the state from an offchain worker. +//! > [`pallet_example_offchain_worker`] provides an example of this. +//! +//! +//! Given the "Off Chain" nature of this API, it is important to remember that calling this API is +//! entirely optional. Some nodes might call into it, some might not, and it would have no impact on +//! the execution of your blockchain because no state is altered no matter the execution of the +//! offchain worker API. +//! +//! Substrate's CLI allows some degree of configuration about this, allowing node operators to +//! specify when they want to run the offchain worker API. See +//! [`sc_cli::RunCmd::offchain_worker_params`]. +//! +//! ## Nondeterministic Execution +//! +//! Needless to say, given the above description, the code in your offchain worker API can be +//! nondeterministic, as it is not part of the blockchain's STF, so it can be executed at unknown +//! times, by unknown nodes, and has no impact on the state. This is why an HTTP +//! ([`sp_runtime::offchain::http`]) API is readily provided to the offchain worker APIs. Because +//! there is no need for determinism in this context. +//! +//! > A common mistake here is for novice developers to see this HTTP API, and imagine that +//! > `polkadot-sdk` somehow magically solved the determinism in blockchains, and now a blockchain +//! > can make HTTP calls and it will all work. This is absolutely NOT the case. An HTTP call made +//! > by the offchain worker is non-deterministic by design. Blockchains can't and always won't be +//! > able to perform non-deterministic operations such as making HTTP calls to a foreign server. +//! +//! ## FRAME's API +//! +//! [`frame`] provides a simple API through which pallets can define offchain worker functions. This +//! is part of [`frame::traits::Hooks`], which is implemented as a part of +//! [`frame::pallet_macros::hooks`]. +//! +//! ``` +//! +//! #[frame::pallet] +//! pub mod pallet { +//! use frame::prelude::*; +//! +//! #[pallet::config] +//! pub trait Config: frame_system::Config {} +//! +//! #[pallet::pallet] +//! pub struct Pallet(_); +//! +//! #[pallet::hooks] +//! impl Hooks> for Pallet { +//! fn offchain_worker(block_number: BlockNumberFor) { +//! // ... +//! } +//! } +//! } +//! ``` +//! +//! Additionally, [`sp_runtime::offchain`] provides a set of utilities that can be used to moderate +//! the execution of offchain workers. +//! +//! ## Think Twice: Why Use Substrate's Offchain Workers? +//! +//! Consider the fact that in principle, an offchain worker code written using the above API is no +//! different than an equivalent written with an _actual offchain interaction library_, such as +//! [Polkadot-JS](https://polkadot.js.org/docs/), or any of the other ones listed [here](https://github.com/substrate-developer-hub/awesome-substrate?tab=readme-ov-file#client-libraries). +//! +//! They can both read from the state, and have no means of updating the state, other than the route +//! of submitting an extrinsic to the chain. Therefore, it is worth thinking twice before embedding +//! a logic as a part of Substrate's offchain worker API. Does it have to be there? can it not be a +//! simple, actual offchain application that lives outside of the chain's WASM blob? +//! +//! Some of the reasons why you might want to do the opposite, and actually embed an offchain worker +//! API into the WASM blob are: +//! +//! * Accessing the state is easier within the `offchain_worker` function, as it is already a part +//! of the runtime, and [`frame::pallet_macros::storage`] provides all the tools needed to read +//! the state. Other client libraries might provide varying degrees of capability here. +//! * It will be updated in synchrony with the runtime. A Substrate's offchain application is part +//! of the same WASM blob, and is therefore guaranteed to be up to date. +//! +//! For example, imagine you have modified a storage item to have a new type. This will possibly +//! require a [`crate::reference_docs::frame_runtime_upgrades_and_migrations`], and any offchain +//! code, such as a Polkadot-JS application, will have to be updated to reflect this change. Whereas +//! the WASM offchain worker code is guaranteed to already be updated, or else the runtime code will +//! not even compile. +//! +//! +//! ## Further References +//! +//! - +//! - +//! - [Offchain worker example](https://github.com/paritytech/polkadot-sdk/tree/master/substrate/frame/examples/offchain-worker) diff --git a/docs/sdk/src/reference_docs/frame_origin.rs b/docs/sdk/src/reference_docs/frame_origin.rs index a4078377cd77dad6b3aac78ba6acdddda14251a8..49533157b014d77b3766cbfc8c3353397e0e48bf 100644 --- a/docs/sdk/src/reference_docs/frame_origin.rs +++ b/docs/sdk/src/reference_docs/frame_origin.rs @@ -1,14 +1,260 @@ //! # FRAME Origin //! -//! Notes: -//! -//! - Def talk about account abstraction and how it is a solved issue in frame. See Gav's talk in -//! Protocol Berg 2023 -//! - system's raw origin, how it is amalgamated with other origins into one type -//! [`frame_composite_enums`] -//! - signed origin -//! - unsigned origin, link to [`fee_less_runtime`] -//! - Root origin, how no one can obtain it. -//! - Abstract origin: how FRAME allows you to express "origin is 2/3 of the this body or 1/2 of -//! that body or half of the token holders". -//! - `type CustomOrigin: EnsureOrigin<_>` in pallets. +//! Let's start by clarifying a common wrong assumption about Origin: +//! +//! **ORIGIN IS NOT AN ACCOUNT ID**. +//! +//! FRAME's origin abstractions allow you to convey meanings far beyond just an account-id being the +//! caller of an extrinsic. Nonetheless, an account-id having signed an extrinsic is one of the +//! meanings that an origin can convey. This is the commonly used [`frame_system::ensure_signed`], +//! where the return value happens to be an account-id. +//! +//! Instead, let's establish the following as the correct definition of an origin: +//! +//! > The origin type represents the privilege level of the caller of an extrinsic. +//! +//! That is, an extrinsic, through checking the origin, can *express what privilege level it wishes +//! to impose on the caller of the extrinsic*. One of those checks can be as simple as "*any account +//! that has signed a statement can pass*". +//! +//! But the origin system can also express more abstract and complicated privilege levels. For +//! example: +//! +//! * If the majority of token holders agreed upon this. This is more or less what the +//! [`pallet_democracy`] does under the hood ([reference](https://github.com/paritytech/polkadot-sdk/blob/edd95b3749754d2ed0c5738588e872c87be91624/substrate/frame/democracy/src/lib.rs#L1603-L1633)). +//! * If a specific ratio of an instance of [`pallet_collective`]/DAO agrees upon this. +//! * If another consensus system, for example a bridged network or a parachain, agrees upon this. +//! * If the majority of validator/authority set agrees upon this[^1]. +//! * If caller holds a particular NFT. +//! +//! and many more. +//! +//! ## Context +//! +//! First, let's look at where the `origin` type is encountered in a typical pallet. The `origin: +//! OriginFor` has to be the first argument of any given callable extrinsic in FRAME: +#![doc = docify::embed!("./src/reference_docs/frame_origin.rs", call_simple)] +//! +//! Typically, the code of an extrinsic starts with an origin check, such as +//! [`frame_system::ensure_signed`]. +//! +//! Note that [`OriginFor`](frame_system::pallet_prelude::OriginFor) is merely a shorthand for +//! [`frame_system::Config::RuntimeOrigin`]. Given the name prefix `Runtime`, we can learn that +//! `RuntimeOrigin` is similar to `RuntimeCall` and others, a runtime composite enum that is +//! amalgamated at the runtime level. Read [`crate::reference_docs::frame_runtime_types`] to +//! familiarize yourself with these types. +//! +//! To understand this better, we will next create a pallet with a custom origin, which will add a +//! new variant to `RuntimeOrigin`. +//! +//! ## Adding Custom Pallet Origin to the Runtime +//! +//! For example, given a pallet that defines the following custom origin: +#![doc = docify::embed!("./src/reference_docs/frame_origin.rs", custom_origin)] +//! +//! And a runtime with the following pallets: +#![doc = docify::embed!("./src/reference_docs/frame_origin.rs", runtime_exp)] +//! +//! The type [`crate::reference_docs::frame_origin::runtime_for_origin::RuntimeOrigin`] is expanded. +//! This `RuntimeOrigin` contains a variant for the [`frame_system::RawOrigin`] and the custom +//! origin of the pallet. +//! +//! > Notice how the [`frame_system::ensure_signed`] is nothing more than a `match` statement. If +//! > you want to know where the actual origin of an extrinsic is set (and the signature +//! > verification happens, if any), see +//! > [`sp_runtime::generic::CheckedExtrinsic#trait-implementations`], specifically +//! > [`sp_runtime::traits::Applyable`]'s implementation. +//! +//! ## Asserting on a Custom Internal Origin +//! +//! In order to assert on a custom origin that is defined within your pallet, we need a way to first +//! convert the `::RuntimeOrigin` into the local `enum Origin` of the +//! current pallet. This is a common process that is explained in +//! [`crate::reference_docs::frame_runtime_types# +//! adding-further-constraints-to-runtime-composite-enums`]. +//! +//! We use the same process here to express that `RuntimeOrigin` has a number of additional bounds, +//! as follows. +//! +//! 1. Defining a custom `RuntimeOrigin` with further bounds in the pallet. +#![doc = docify::embed!("./src/reference_docs/frame_origin.rs", custom_origin_bound)] +//! +//! 2. Using it in the pallet. +#![doc = docify::embed!("./src/reference_docs/frame_origin.rs", custom_origin_usage)] +//! +//! ## Asserting on a Custom External Origin +//! +//! Very often, a pallet wants to have a parameterized origin that is **NOT** defined within the +//! pallet. In other words, a pallet wants to delegate an origin check to something that is +//! specified later at the runtime level. Like many other parameterizations in FRAME, this implies +//! adding a new associated type to `trait Config`. +#![doc = docify::embed!("./src/reference_docs/frame_origin.rs", external_origin_def)] +//! +//! Then, within the pallet, we can simply use this "unknown" origin check type: +#![doc = docify::embed!("./src/reference_docs/frame_origin.rs", external_origin_usage)] +//! +//! Finally, at the runtime, any implementation of [`frame::traits::EnsureOrigin`] can be passed. +#![doc = docify::embed!("./src/reference_docs/frame_origin.rs", external_origin_provide)] +//! +//! Indeed, some of these implementations of [`frame::traits::EnsureOrigin`] are similar to the ones +//! that we know about: [`frame::runtime::prelude::EnsureSigned`], +//! [`frame::runtime::prelude::EnsureSignedBy`], [`frame::runtime::prelude::EnsureRoot`], +//! [`frame::runtime::prelude::EnsureNone`], etc. But, there are also many more that are not known +//! to us, and are defined in other pallets. +//! +//! For example, [`pallet_collective`] defines [`pallet_collective::EnsureMember`] and +//! [`pallet_collective::EnsureProportionMoreThan`] and many more, which is exactly what we alluded +//! to earlier in this document. +//! +//! Make sure to check the full list of [implementors of +//! `EnsureOrigin`](frame::traits::EnsureOrigin#implementors) for more inspiration. +//! +//! ## Obtaining Abstract Origins +//! +//! So far we have learned that FRAME pallets can assert on custom and abstract origin types, +//! whether they are defined within the pallet or not. But how can we obtain these abstract origins? +//! +//! > All extrinsics that come from the outer world can generally only be obtained as either +//! > `signed` or `none` origin. +//! +//! Generally, these abstract origins are only obtained within the runtime, when a call is +//! dispatched within the runtime. +//! +//! ## Further References +//! +//! - [Gavin Wood's speech about FRAME features at Protocol Berg 2023.](https://youtu.be/j7b8Upipmeg?si=83_XUgYuJxMwWX4g&t=195) +//! - [A related StackExchange question.](https://substrate.stackexchange.com/questions/10992/how-do-you-find-the-public-key-for-the-medium-spender-track-origin) +//! +//! [^1]: Inherents are essentially unsigned extrinsics that need an [`frame_system::ensure_none`] +//! origin check, and through the virtue of being an inherent, are agreed upon by all validators. + +use frame::prelude::*; + +#[frame::pallet(dev_mode)] +pub mod pallet_for_origin { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[docify::export(call_simple)] + #[pallet::call] + impl Pallet { + pub fn do_something(_origin: OriginFor) -> DispatchResult { + // ^^^^^^^^^^^^^^^^^^^^^ + todo!(); + } + } +} + +#[frame::pallet(dev_mode)] +pub mod pallet_with_custom_origin { + use super::*; + + #[docify::export(custom_origin_bound)] + #[pallet::config] + pub trait Config: frame_system::Config { + type RuntimeOrigin: From<::RuntimeOrigin> + + Into::RuntimeOrigin>>; + } + + #[pallet::pallet] + pub struct Pallet(_); + + #[docify::export(custom_origin)] + /// A dummy custom origin. + #[pallet::origin] + #[derive(PartialEq, Eq, Clone, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen)] + pub enum Origin { + /// If all holders of a particular NFT have agreed upon this. + AllNftHolders, + /// If all validators have agreed upon this. + ValidatorSet, + } + + #[docify::export(custom_origin_usage)] + #[pallet::call] + impl Pallet { + pub fn only_validators(origin: OriginFor) -> DispatchResult { + // first, we convert from `::RuntimeOrigin` to `::RuntimeOrigin` + let local_runtime_origin = <::RuntimeOrigin as From< + ::RuntimeOrigin, + >>::from(origin); + // then we convert to `origin`, if possible + let local_origin = + local_runtime_origin.into().map_err(|_| "invalid origin type provided")?; + ensure!(matches!(local_origin, Origin::ValidatorSet), "Not authorized"); + todo!(); + } + } +} + +pub mod runtime_for_origin { + use super::pallet_with_custom_origin; + use frame::{runtime::prelude::*, testing_prelude::*}; + + #[docify::export(runtime_exp)] + construct_runtime!( + pub struct Runtime { + System: frame_system, + PalletWithCustomOrigin: pallet_with_custom_origin, + } + ); + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Runtime { + type Block = MockBlock; + } + + impl pallet_with_custom_origin::Config for Runtime { + type RuntimeOrigin = RuntimeOrigin; + } +} + +#[frame::pallet(dev_mode)] +pub mod pallet_with_external_origin { + use super::*; + #[docify::export(external_origin_def)] + #[pallet::config] + pub trait Config: frame_system::Config { + type ExternalOrigin: EnsureOrigin; + } + + #[pallet::pallet] + pub struct Pallet(_); + + #[docify::export(external_origin_usage)] + #[pallet::call] + impl Pallet { + pub fn externally_checked_ext(origin: OriginFor) -> DispatchResult { + let _ = T::ExternalOrigin::ensure_origin(origin)?; + todo!(); + } + } +} + +pub mod runtime_for_external_origin { + use super::*; + use frame::{runtime::prelude::*, testing_prelude::*}; + + construct_runtime!( + pub struct Runtime { + System: frame_system, + PalletWithExternalOrigin: pallet_with_external_origin, + } + ); + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Runtime { + type Block = MockBlock; + } + + #[docify::export(external_origin_provide)] + impl pallet_with_external_origin::Config for Runtime { + type ExternalOrigin = EnsureSigned<::AccountId>; + } +} diff --git a/docs/sdk/src/reference_docs/frame_pallet_coupling.rs b/docs/sdk/src/reference_docs/frame_pallet_coupling.rs new file mode 100644 index 0000000000000000000000000000000000000000..942717ecfb2a12e44d45173c1631f77907f9dbaa --- /dev/null +++ b/docs/sdk/src/reference_docs/frame_pallet_coupling.rs @@ -0,0 +1,296 @@ +//! # FRAME Pallet Coupling +//! +//! This reference document explains how FRAME pallets can be combined to interact together. +//! +//! It is suggested to re-read [`crate::polkadot_sdk::frame_runtime`], notably the information +//! around [`frame::pallet_macros::config`]. Recall that: +//! +//! > Configuration trait of a pallet: It allows a pallet to receive types at a later +//! > point from the runtime that wishes to contain it. It allows the pallet to be parameterized +//! > over both types and values. +//! +//! ## Context, Background +//! +//! FRAME pallets, as per described in [`crate::polkadot_sdk::frame_runtime`] are: +//! +//! > A pallet is a unit of encapsulated logic. It has a clearly defined responsibility and can be +//! linked to other pallets. +//! +//! That is to say: +//! +//! * *encapsulated*: Ideally, a FRAME pallet contains encapsulated logic which has clear +//! boundaries. It is generally a bad idea to build a single monolithic pallet that does multiple +//! things, such as handling currencies, identities and staking all at the same time. +//! * *linked to other pallets*: But, adhering extensively to the above also hinders the ability to +//! write useful applications. Pallets often need to work with each other, communicate and use +//! each other's functionalities. +//! +//! The broad principle that allows pallets to be linked together is the same way through which a +//! pallet uses its `Config` trait to receive types and values from the runtime that contains it. +//! +//! There are generally two ways to achieve this: +//! +//! 1. Tight coupling pallets +//! 2. Loose coupling pallets +//! +//! To explain the difference between the two, consider two pallets, `A` and `B`. In both cases, `A` +//! wants to use some functionality exposed by `B`. +//! +//! When tightly coupling pallets, `A` can only exist in a runtime if `B` is also present in the +//! same runtime. That is, `A` is expressing that can only work if `B` is present. +//! +//! This translates to the following Rust code: +//! +//! ``` +//! trait Pallet_B_Config {} +//! trait Pallet_A_Config: Pallet_B_Config {} +//! ``` +//! +//! Contrary, when pallets are loosely coupled, `A` expresses that some functionality, expressed via +//! a trait `F`, needs to be fulfilled. This trait is then implemented by `B`, and the two pallets +//! are linked together at the runtime level. This means that `A` only relies on the implementation +//! of `F`, which may be `B`, or another implementation of `F`. +//! +//! This translates to the following Rust code: +//! +//! ``` +//! trait F {} +//! trait Pallet_A_Config { +//! type F: F; +//! } +//! // Pallet_B will implement and fulfill `F`. +//! ``` +//! +//! ## Example +//! +//! Consider the following example, in which `pallet-foo` needs another pallet to provide the block +//! author to it, and `pallet-author` which has access to this information. +#![doc = docify::embed!("./src/reference_docs/frame_pallet_coupling.rs", pallet_foo)] +#![doc = docify::embed!("./src/reference_docs/frame_pallet_coupling.rs", pallet_author)] +//! +//! ### Tight Coupling Pallets +//! +//! To tightly couple `pallet-foo` and `pallet-author`, we use Rust's supertrait system. When a +//! pallet makes its own `trait Config` be bounded by another pallet's `trait Config`, it is +//! expressing two things: +//! +//! 1. that it can only exist in a runtime if the other pallet is also present. +//! 2. that it can use the other pallet's functionality. +//! +//! `pallet-foo`'s `Config` would then look like: +#![doc = docify::embed!("./src/reference_docs/frame_pallet_coupling.rs", tight_config)] +//! +//! And `pallet-foo` can use the method exposed by `pallet_author::Pallet` directly: +#![doc = docify::embed!("./src/reference_docs/frame_pallet_coupling.rs", tight_usage)] +//! +//! +//! ### Loosely Coupling Pallets +//! +//! If `pallet-foo` wants to *not* rely on `pallet-author` directly, it can leverage its +//! `Config`'s associated types. First, we need a trait to express the functionality that +//! `pallet-foo` wants to obtain: +#![doc = docify::embed!("./src/reference_docs/frame_pallet_coupling.rs", AuthorProvider)] +//! +//! > We sometimes refer to such traits that help two pallets interact as "glue traits". +//! +//! Next, `pallet-foo` states that it needs this trait to be provided to it, at the runtime level, +//! via an associated type: +#![doc = docify::embed!("./src/reference_docs/frame_pallet_coupling.rs", loose_config)] +//! +//! Then, `pallet-foo` can use this trait to obtain the block author, without knowing where it comes +//! from: +#![doc = docify::embed!("./src/reference_docs/frame_pallet_coupling.rs", loose_usage)] +//! +//! Then, if `pallet-author` implements this glue-trait: +#![doc = docify::embed!("./src/reference_docs/frame_pallet_coupling.rs", pallet_author_provider)] +//! +//! And upon the creation of the runtime, the two pallets are linked together as such: +#![doc = docify::embed!("./src/reference_docs/frame_pallet_coupling.rs", runtime_author_provider)] +//! +//! Crucially, when using loose coupling, we gain the flexibility of providing different +//! implementations of `AuthorProvider`, such that different users of a `pallet-foo` can use +//! different ones, without any code change being needed. For example, in the code snippets of this +//! module, you can fund [`OtherAuthorProvider`] which is an alternative implementation of +//! [`AuthorProvider`]. +#![doc = docify::embed!("./src/reference_docs/frame_pallet_coupling.rs", other_author_provider)] +//! +//! A common pattern in polkadot-sdk is to provide an implementation of such glu traits for the unit +//! type as a "default/test behavior". +#![doc = docify::embed!("./src/reference_docs/frame_pallet_coupling.rs", unit_author_provider)] +//! +//! ## Frame System +//! +//! With the above information in context, we can conclude that **`frame_system` is a special pallet +//! that is tightly coupled with every other pallet**. This is because it provides the fundamental +//! system functionality that every pallet needs, such as some types like +//! [`frame::prelude::frame_system::Config::AccountId`], +//! [`frame::prelude::frame_system::Config::Hash`], and some functionality such as block number, +//! etc. +//! +//! ## Recap +//! +//! To recap, consider the following rules of thumb: +//! +//! * In all cases, try and break down big pallets apart with clear boundaries of responsibility. In +//! general, it is easier to argue about multiple pallet if they only communicate together via a +//! known trait, rather than having access to all of each others public items, such as storage and +//! dispatchables. +//! * If a group of pallets are meant to work together, and but are not foreseen to be generalized, +//! or used by others, consider tightly coupling pallets, *if it simplifies the development*. +//! * If a pallet needs a functionality provided by another pallet, but multiple implementations can +//! be foreseen, consider loosely coupling pallets. +//! +//! For example, all pallets in `polkadot-sdk` that needed to work with currencies could have been +//! tightly coupled with [`pallet_balances`]. But, `polkadot-sdk` also provides [`pallet_assets`] +//! (and more implementations by the community), therefore all pallets use traits to loosely couple +//! with balances or assets pallet. More on this in [`crate::reference_docs::frame_currency`]. +//! +//! ## Further References +//! +//! - +//! - +//! +//! [`AuthorProvider`]: crate::reference_docs::frame_pallet_coupling::AuthorProvider +//! [`OtherAuthorProvider`]: crate::reference_docs::frame_pallet_coupling::OtherAuthorProvider + +#![allow(unused)] + +use frame::prelude::*; + +#[docify::export] +#[frame::pallet] +pub mod pallet_foo { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + impl Pallet { + fn do_stuff_with_author() { + // needs block author here + } + } +} + +#[docify::export] +#[frame::pallet] +pub mod pallet_author { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + impl Pallet { + pub fn author() -> T::AccountId { + todo!("somehow has access to the block author and can return it here") + } + } +} + +#[frame::pallet] +pub mod pallet_foo_tight { + use super::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[docify::export(tight_config)] + /// This pallet can only live in a runtime that has both `frame_system` and `pallet_author`. + #[pallet::config] + pub trait Config: frame_system::Config + pallet_author::Config {} + + #[docify::export(tight_usage)] + impl Pallet { + // anywhere in `pallet-foo`, we can call into `pallet-author` directly, namely because + // `T: pallet_author::Config` + fn do_stuff_with_author() { + let _ = pallet_author::Pallet::::author(); + } + } +} + +#[docify::export] +/// Abstraction over "something that can provide the block author". +pub trait AuthorProvider { + fn author() -> AccountId; +} + +#[frame::pallet] +pub mod pallet_foo_loose { + use super::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[docify::export(loose_config)] + #[pallet::config] + pub trait Config: frame_system::Config { + /// This pallet relies on the existence of something that implements [`AuthorProvider`], + /// which may or may not be `pallet-author`. + type AuthorProvider: AuthorProvider; + } + + #[docify::export(loose_usage)] + impl Pallet { + fn do_stuff_with_author() { + let _ = T::AuthorProvider::author(); + } + } +} + +#[docify::export(pallet_author_provider)] +impl AuthorProvider for pallet_author::Pallet { + fn author() -> T::AccountId { + pallet_author::Pallet::::author() + } +} + +pub struct OtherAuthorProvider; + +#[docify::export(other_author_provider)] +impl AuthorProvider for OtherAuthorProvider { + fn author() -> AccountId { + todo!("somehow get the block author here") + } +} + +#[docify::export(unit_author_provider)] +impl AuthorProvider for () { + fn author() -> AccountId { + todo!("somehow get the block author here") + } +} + +pub mod runtime { + use super::*; + use cumulus_pallet_aura_ext::pallet; + use frame::{runtime::prelude::*, testing_prelude::*}; + + construct_runtime!( + pub struct Runtime { + System: frame_system, + PalletFoo: pallet_foo_loose, + PalletAuthor: pallet_author, + } + ); + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Runtime { + type Block = MockBlock; + } + + impl pallet_author::Config for Runtime {} + + #[docify::export(runtime_author_provider)] + impl pallet_foo_loose::Config for Runtime { + type AuthorProvider = pallet_author::Pallet; + // which is also equivalent to + // type AuthorProvider = PalletAuthor; + } +} diff --git a/docs/sdk/src/reference_docs/frame_runtime_migration.rs b/docs/sdk/src/reference_docs/frame_runtime_migration.rs deleted file mode 100644 index 0616ccbb6f57971823c7347a60574ef0c0bef2ab..0000000000000000000000000000000000000000 --- a/docs/sdk/src/reference_docs/frame_runtime_migration.rs +++ /dev/null @@ -1,9 +0,0 @@ -//! # Runtime Runtime Upgrade and Testing -//! -//! -//! Notes: -//! -//! - Flow of things, when does `on_runtime_upgrade` get called. Link to to `Hooks` and its diagram -//! as source of truth. -//! - Data migration and when it is needed. -//! - Look into the pba-lecture. diff --git a/docs/sdk/src/reference_docs/frame_runtime_types.rs b/docs/sdk/src/reference_docs/frame_runtime_types.rs new file mode 100644 index 0000000000000000000000000000000000000000..b5838b79e5183db329d4871613c5a23227b8e824 --- /dev/null +++ b/docs/sdk/src/reference_docs/frame_runtime_types.rs @@ -0,0 +1,306 @@ +//! # FRAME Runtime Types +//! +//! This reference document briefly explores the idea around types generated at the runtime level by +//! the FRAME macros. +//! +//! > As of now, many of these important types are generated within the internals of +//! > [`construct_runtime`], and there is no easy way for you to visually know they exist. +//! > [#polkadot-sdk#1378](https://github.com/paritytech/polkadot-sdk/pull/1378) is meant to +//! > significantly improve this. Exploring the rust-docs of a runtime, such as [`runtime`] which is +//! > defined in this module is as of now the best way to learn about these types. +//! +//! ## Composite Enums +//! +//! Many types within a FRAME runtime follow the following structure: +//! +//! * Each individual pallet defines a type, for example `Foo`. +//! * At the runtime level, these types are amalgamated into a single type, for example +//! `RuntimeFoo`. +//! +//! As the names suggest, all composite enums in a FRAME runtime start their name with `Runtime`. +//! For example, `RuntimeCall` is a representation of the most high level `Call`-able type in the +//! runtime. +//! +//! Composite enums are generally convertible to their individual parts as such: +#![doc = simple_mermaid::mermaid!("../../../mermaid/outer_runtime_types.mmd")] +//! +//! In that one can always convert from the inner type into the outer type, but not vice versa. This +//! is usually expressed by implementing `From`, `TryFrom`, `From>` and similar traits. +//! +//! ### Example +//! +//! We provide the following two pallets: [`pallet_foo`] and [`pallet_bar`]. Each define a +//! dispatchable, and `Foo` also defines a custom origin. Lastly, `Bar` defines an additional +//! `GenesisConfig`. +#![doc = docify::embed!("./src/reference_docs/frame_runtime_types.rs", pallet_foo)] +#![doc = docify::embed!("./src/reference_docs/frame_runtime_types.rs", pallet_bar)] +//! +//! Let's explore how each of these affect the [`RuntimeCall`], [`RuntimeOrigin`] and +//! [`RuntimeGenesisConfig`] generated in [`runtime`] by respectively. +//! +//! As observed, [`RuntimeCall`] has 3 variants, one for each pallet and one for `frame_system`. If +//! you explore further, you will soon realize that each variant is merely a pointer to the `Call` +//! type in each pallet, for example [`pallet_foo::Call`]. +//! +//! [`RuntimeOrigin`]'s [`OriginCaller`] has two variants, one for system, and one for `pallet_foo` +//! which utilized [`frame::pallet_macros::origin`]. +//! +//! Finally, [`RuntimeGenesisConfig`] is composed of `frame_system` and a variant for `pallet_bar`'s +//! [`pallet_bar::GenesisConfig`]. +//! +//! You can find other composite enums by scanning [`runtime`] for other types who's name starts +//! with `Runtime`. Some of the more noteworthy ones are: +//! +//! - [`RuntimeEvent`] +//! - [`RuntimeError`] +//! - [`RuntimeHoldReason`] +//! +//! ### Adding Further Constraints to Runtime Composite Enums +//! +//! This section explores a common scenario where a pallet has access to one of these runtime +//! composite enums, but it wishes to further specify it by adding more trait bounds to it. +//! +//! Let's take the example of `RuntimeCall`. This is an associated type in +//! [`frame_system::Config::RuntimeCall`], and all pallets have access to this type, because they +//! have access to [`frame_system::Config`]. Finally, this type is meant to be set to outer call of +//! the entire runtime. +//! +//! But, let's not forget that this is information that *we know*, and the Rust compiler does not. +//! All that the rust compiler knows about this type is *ONLY* what the trait bounds of +//! [`frame_system::Config::RuntimeCall`] are specifying: +#![doc = docify::embed!("../../substrate/frame/system/src/lib.rs", system_runtime_call)] +//! +//! So, when at a given pallet, one accesses `::RuntimeCall`, the type is +//! extremely opaque from the perspective of the Rust compiler. +//! +//! How can a pallet access the `RuntimeCall` type with further constraints? For example, each +//! pallet has its own `enum Call`, and knows that its local `Call` is a part of `RuntimeCall`, +//! therefore there should be a `impl From> for RuntimeCall`. +//! +//! The only way to express this using Rust's associated types is for the pallet to **define its own +//! associated type `RuntimeCall`, and further specify what it thinks `RuntimeCall` should be**. +//! +//! In this case, we will want to assert the existence of [`frame::traits::IsSubType`], which is +//! very similar to [`TryFrom`]. +#![doc = docify::embed!("./src/reference_docs/frame_runtime_types.rs", custom_runtime_call)] +//! +//! And indeed, at the runtime level, this associated type would be the same `RuntimeCall` that is +//! passed to `frame_system`. +#![doc = docify::embed!("./src/reference_docs/frame_runtime_types.rs", pallet_with_specific_runtime_call_impl)] +//! +//! > In other words, the degree of specificity that [`frame_system::Config::RuntimeCall`] has is +//! > not enough for the pallet to work with. Therefore, the pallet has to define its own associated +//! > type representing `RuntimeCall`. +//! +//! Another way to look at this is: +//! +//! `pallet_with_specific_runtime_call::Config::RuntimeCall` and `frame_system::Config::RuntimeCall` +//! are two different representations of the same concrete type that is only known when the runtime +//! is being constructed. +//! +//! Now, within this pallet, this new `RuntimeCall` can be used, and it can use its new trait +//! bounds, such as being [`frame::traits::IsSubType`]: +#![doc = docify::embed!("./src/reference_docs/frame_runtime_types.rs", custom_runtime_call_usages)] +//! +//! ### Asserting Equality of Multiple Runtime Composite Enums +//! +//! Recall that in the above example, `::RuntimeCall` and `::RuntimeCall` are expected to be equal types, but at the compile-time we +//! have to represent them with two different associated types with different bounds. Would it not +//! be cool if we had a test to make sure they actually resolve to the same concrete type once the +//! runtime is constructed? The following snippet exactly does that: +#![doc = docify::embed!("./src/reference_docs/frame_runtime_types.rs", assert_equality)] +//! +//! We leave it to the reader to further explore what [`frame::traits::Hooks::integrity_test`] is, +//! and what [`core::any::TypeId`] is. Another way to assert this is using +//! [`frame::traits::IsType`]. +//! +//! ## Type Aliases +//! +//! A number of type aliases are generated by the `construct_runtime` which are also noteworthy: +//! +//! * [`runtime::PalletFoo`] is an alias to [`pallet_foo::Pallet`]. Same for `PalletBar`, and +//! `System` +//! * [`runtime::AllPalletsWithSystem`] is an alias for a tuple of all of the above. This type is +//! important to FRAME internals such as `executive`, as it implements traits such as +//! [`frame::traits::Hooks`]. +//! +//! ## Further Details +//! +//! * [`crate::reference_docs::frame_origin`] explores further details about the usage of +//! `RuntimeOrigin`. +//! * [`RuntimeCall`] is a particularly interesting composite enum as it dictates the encoding of an +//! extrinsic. See [`crate::reference_docs::signed_extensions`] for more information. +//! * See the documentation of [`construct_runtime`]. +//! * See the corresponding lecture in the [pba-book](https://polkadot-blockchain-academy.github.io/pba-book/frame/outer-enum/page.html). +//! +//! +//! [`construct_runtime`]: frame::runtime::prelude::construct_runtime +//! [`runtime::PalletFoo`]: crate::reference_docs::frame_runtime_types::runtime::PalletFoo +//! [`runtime::AllPalletsWithSystem`]: crate::reference_docs::frame_runtime_types::runtime::AllPalletsWithSystem +//! [`runtime`]: crate::reference_docs::frame_runtime_types::runtime +//! [`pallet_foo`]: crate::reference_docs::frame_runtime_types::pallet_foo +//! [`pallet_foo::Call`]: crate::reference_docs::frame_runtime_types::pallet_foo::Call +//! [`pallet_foo::Pallet`]: crate::reference_docs::frame_runtime_types::pallet_foo::Pallet +//! [`pallet_bar`]: crate::reference_docs::frame_runtime_types::pallet_bar +//! [`pallet_bar::GenesisConfig`]: crate::reference_docs::frame_runtime_types::pallet_bar::GenesisConfig +//! [`RuntimeEvent`]: crate::reference_docs::frame_runtime_types::runtime::RuntimeEvent +//! [`RuntimeGenesisConfig`]: +//! crate::reference_docs::frame_runtime_types::runtime::RuntimeGenesisConfig +//! [`RuntimeOrigin`]: crate::reference_docs::frame_runtime_types::runtime::RuntimeOrigin +//! [`OriginCaller`]: crate::reference_docs::frame_runtime_types::runtime::OriginCaller +//! [`RuntimeError`]: crate::reference_docs::frame_runtime_types::runtime::RuntimeError +//! [`RuntimeCall`]: crate::reference_docs::frame_runtime_types::runtime::RuntimeCall +//! [`RuntimeHoldReason`]: crate::reference_docs::frame_runtime_types::runtime::RuntimeHoldReason + +use frame::prelude::*; + +#[docify::export] +#[frame::pallet(dev_mode)] +pub mod pallet_foo { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::origin] + #[derive(PartialEq, Eq, Clone, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen)] + pub enum Origin { + A, + B, + } + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::call] + impl Pallet { + pub fn foo(_origin: OriginFor) -> DispatchResult { + todo!(); + } + + pub fn other(_origin: OriginFor) -> DispatchResult { + todo!(); + } + } +} + +#[docify::export] +#[frame::pallet(dev_mode)] +pub mod pallet_bar { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::genesis_config] + #[derive(DefaultNoBound)] + pub struct GenesisConfig { + pub initial_account: Option, + } + + #[pallet::genesis_build] + impl BuildGenesisConfig for GenesisConfig { + fn build(&self) {} + } + + #[pallet::call] + impl Pallet { + pub fn bar(_origin: OriginFor) -> DispatchResult { + todo!(); + } + } +} + +pub mod runtime { + use super::{pallet_bar, pallet_foo}; + use frame::{runtime::prelude::*, testing_prelude::*}; + + #[docify::export(runtime_exp)] + construct_runtime!( + pub struct Runtime { + System: frame_system, + PalletFoo: pallet_foo, + PalletBar: pallet_bar, + } + ); + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Runtime { + type Block = MockBlock; + } + + impl pallet_foo::Config for Runtime {} + impl pallet_bar::Config for Runtime {} +} + +#[frame::pallet(dev_mode)] +pub mod pallet_with_specific_runtime_call { + use super::*; + use frame::traits::IsSubType; + + #[docify::export(custom_runtime_call)] + /// A pallet that wants to further narrow down what `RuntimeCall` is. + #[pallet::config] + pub trait Config: frame_system::Config { + type RuntimeCall: IsSubType>; + } + + #[pallet::pallet] + pub struct Pallet(_); + + // note that this pallet needs some `call` to have a `enum Call`. + #[pallet::call] + impl Pallet { + pub fn foo(_origin: OriginFor) -> DispatchResult { + todo!(); + } + } + + #[docify::export(custom_runtime_call_usages)] + impl Pallet { + fn _do_something_useful_with_runtime_call(call: ::RuntimeCall) { + // check if the runtime call given is of this pallet's variant. + let _maybe_my_call: Option<&Call> = call.is_sub_type(); + todo!(); + } + } + + #[docify::export(assert_equality)] + #[pallet::hooks] + impl Hooks> for Pallet { + fn integrity_test() { + use core::any::TypeId; + assert_eq!( + TypeId::of::<::RuntimeCall>(), + TypeId::of::<::RuntimeCall>() + ); + } + } +} + +pub mod runtime_with_specific_runtime_call { + use super::pallet_with_specific_runtime_call; + use frame::{runtime::prelude::*, testing_prelude::*}; + + construct_runtime!( + pub struct Runtime { + System: frame_system, + PalletWithSpecificRuntimeCall: pallet_with_specific_runtime_call, + } + ); + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Runtime { + type Block = MockBlock; + } + + #[docify::export(pallet_with_specific_runtime_call_impl)] + impl pallet_with_specific_runtime_call::Config for Runtime { + // an implementation of `IsSubType` is provided by `construct_runtime`. + type RuntimeCall = RuntimeCall; + } +} diff --git a/docs/sdk/src/reference_docs/frame_runtime_upgrades_and_migrations.rs b/docs/sdk/src/reference_docs/frame_runtime_upgrades_and_migrations.rs new file mode 100644 index 0000000000000000000000000000000000000000..7d870b432218e9e9a06c25f4f19e1f07ca72c6c6 --- /dev/null +++ b/docs/sdk/src/reference_docs/frame_runtime_upgrades_and_migrations.rs @@ -0,0 +1,138 @@ +//! # Runtime Upgrades +//! +//! At their core, blockchain logic consists of +//! +//! 1. on-chain state and +//! 2. a state transition function +//! +//! In Substrate-based blockchains, state transition functions are referred to as +//! [runtimes](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/reference_docs/blockchain_state_machines/index.html). +//! +//! Traditionally, before Substrate, upgrading state transition functions required node +//! operators to download new software and restart their nodes in a process called +//! [forking](https://en.wikipedia.org/wiki/Fork_(blockchain)). +//! +//! Substrate-based blockchains do not require forking, and instead upgrade runtimes +//! in a process called "Runtime Upgrades". +//! +//! Forkless runtime upgrades are a defining feature of the Substrate framework. Updating the +//! runtime logic without forking the code base enables your blockchain to seemlessly evolve +//! over time in a deterministic, rules-based manner. It also removes ambiguity for node operators +//! and other participants in the network about what is the canonical runtime. +//! +//! This capability is possible due to the runtime of a blockchain existing in on-chain storage. +//! +//! ## Performing a Runtime Upgrade +//! +//! To upgrade a runtime, an [`Origin`](frame_system::RawOrigin) with the necesarry permissions +//! (usually via governance) changes the `:code` storage. Usually, this is performed via a call to +//! [`set_code`] (or [`set_code_without_checks`]) with the desired new runtime blob, scheduled +//! using [`pallet_scheduler`]. +//! +//! Prior to building the new runtime, don't forget to update the +//! [`RuntimeVersion`](sp_version::RuntimeVersion). +//! +//! # Migrations +//! +//! It is often desirable to define logic to execute immediately after runtime upgrades (see +//! [this diagram](frame::traits::Hooks)). +//! +//! Self-contained pieces of logic that execute after a runtime upgrade are called "Migrations". +//! +//! The typical use case of a migration is to 'migrate' pallet storage from one layout to another, +//! for example when the encoding of a storage item is changed. However, they can also execute +//! arbitary logic such as: +//! +//! - Calling arbitrary pallet methods +//! - Mutating arbitrary on-chain state +//! - Cleaning up some old storage items that are no longer needed +//! +//! ## Single Block Migrations +//! +//! - Execute immediately and entirely at the beginning of the block following +//! a runtime upgrade. +//! - Are suitable for migrations which are guaranteed to not exceed the block weight. +//! - Are simply implementations of [`OnRuntimeUpgrade`]. +//! +//! To learn best practices for writing single block pallet storage migrations, see the +//! [Single Block Migration Example Pallet](pallet_example_single_block_migrations). +//! +//! ### Scheduling the Single Block Migrations to Run Next Runtime Upgrade +//! +//! Schedule migrations to run next runtime upgrade passing them as a generic parameter to your +//! [`Executive`](frame_executive) pallet: +//! +//! ```ignore +//! /// Tuple of migrations (structs that implement `OnRuntimeUpgrade`) +//! type Migrations = ( +//! pallet_example_storage_migration::migrations::v1::versioned::MigrateV0ToV1, +//! MyCustomMigration, +//! // ...more migrations here +//! ); +//! pub type Executive = frame_executive::Executive< +//! Runtime, +//! Block, +//! frame_system::ChainContext, +//! Runtime, +//! AllPalletsWithSystem, +//! Migrations, // <-- pass your migrations to Executive here +//! >; +//! ``` +//! +//! ### Ensuring Single Block Migration Safety +//! +//! "My migration unit tests pass, so it should be safe to deploy right?" +//! +//! No! Unit tests execute the migration in a very simple test environment, and cannot account +//! for the complexities of a real runtime or real on-chain state. +//! +//! Prior to deploying migrations, it is critical to perform additional checks to ensure that when +//! run in our real runtime they will not brick the chain due to: +//! - Panicing +//! - Touching too many storage keys and resulting in an excessively large PoV +//! - Taking too long to execute +//! +//! [`try-runtime-cli`](https://github.com/paritytech/try-runtime-cli) has a sub-command +//! [`on-runtime-upgrade`](https://paritytech.github.io/try-runtime-cli/try_runtime_core/commands/enum.Action.html#variant.OnRuntimeUpgrade) +//! which is designed to help with exactly this. +//! +//! Developers MUST run this command before deploying migrations to ensure they will not +//! inadvertently result in a bricked chain. +//! +//! It is recommended to run as part of your CI pipeline. See the +//! [polkadot-sdk check-runtime-migration job](https://github.com/paritytech/polkadot-sdk/blob/4a293bc5a25be637c06ce950a34490706597615b/.gitlab/pipeline/check.yml#L103-L124) +//! for an example of how to configure this. +//! +//! ### Note on the Manipulability of PoV Size and Execution Time +//! +//! While [`try-runtime-cli`](https://github.com/paritytech/try-runtime-cli) can help ensure with +//! very high certainty that a migration will succeed given **existing** on-chain state, it cannot +//! prevent a malicious actor from manipulating state in a way that will cause the migration to take +//! longer or produce a PoV much larger than previously measured. +//! +//! Therefore, it is important to write migrations in such a way that the execution time or PoV size +//! it adds to the block cannot be easily manipulated. e.g., do not iterate over storage that can +//! quickly or cheaply be bloated. +//! +//! If writing your migration in such a way is not possible, a multi block migration should be used +//! instead. +//! +//! ### Other useful tools +//! +//! [`Chopsticks`](https://github.com/AcalaNetwork/chopsticks) is another tool in the Substrate +//! ecosystem which developers may find useful to use in addition to `try-runtime-cli` when testing +//! their single block migrations. +//! +//! ## Multi Block Migrations +//! +//! Safely and easily execute long-running migrations across multiple blocks. +//! +//! Suitable for migrations which could use arbitrary amounts of block weight. +//! +//! TODO: Link to multi block migration example/s once PR is merged (). +//! +//! [`GetStorageVersion`]: frame_support::traits::GetStorageVersion +//! [`OnRuntimeUpgrade`]: frame_support::traits::OnRuntimeUpgrade +//! [`StorageVersion`]: frame_support::traits::StorageVersion +//! [`set_code`]: frame_system::Call::set_code +//! [`set_code_without_checks`]: frame_system::Call::set_code_without_checks diff --git a/docs/sdk/src/reference_docs/mod.rs b/docs/sdk/src/reference_docs/mod.rs index c16122ee4287b17614f5d61acc9f26df0429c2cd..de0b012bb1268537a0caa7f1f027f53fd887f6c0 100644 --- a/docs/sdk/src/reference_docs/mod.rs +++ b/docs/sdk/src/reference_docs/mod.rs @@ -43,16 +43,16 @@ pub mod extrinsic_encoding; // TODO: @jsdw https://github.com/paritytech/polkadot-sdk-docs/issues/42 pub mod signed_extensions; -/// Learn about *"Origin"* A topic in FRAME that enables complex account abstractions to be built. -// TODO: @shawntabrizi https://github.com/paritytech/polkadot-sdk-docs/issues/43 +/// Learn about *Origins*, a topic in FRAME that enables complex account abstractions to be built. pub mod frame_origin; /// Learn about how to write safe and defensive code in your FRAME runtime. // TODO: @CrackTheCode016 https://github.com/paritytech/polkadot-sdk-docs/issues/44 pub mod safe_defensive_programming; -/// Learn about composite enums in FRAME-based runtimes, such as "RuntimeEvent" and "RuntimeCall". -pub mod frame_composite_enums; +/// Learn about composite enums and other runtime level types, such as "RuntimeEvent" and +/// "RuntimeCall". +pub mod frame_runtime_types; /// Learn about how to make a pallet/runtime that is fee-less and instead uses another mechanism to /// control usage and sybil attacks. @@ -92,11 +92,18 @@ pub mod cli; // TODO: @JoshOrndorff @kianenigma https://github.com/paritytech/polkadot-sdk-docs/issues/54 pub mod consensus_swapping; -/// Learn about all the advance ways to test your coordinate a rutnime upgrade and data migration. -// TODO: @liamaharon https://github.com/paritytech/polkadot-sdk-docs/issues/55 -pub mod frame_runtime_migration; +/// Learn about Runtime Upgrades and best practices for writing Migrations. +pub mod frame_runtime_upgrades_and_migrations; /// Learn about light nodes, how they function, and how Substrate-based chains come /// light-node-first out of the box. // TODO: @jsdw @josepot https://github.com/paritytech/polkadot-sdk-docs/issues/68 pub mod light_nodes; + +/// Learn about the offchain workers, how they function, and how to use them, as provided by the +/// [`frame`] APIs. +pub mod frame_offchain_workers; + +/// Learn about the different ways through which multiple [`frame`] pallets can be combined to work +/// together. +pub mod frame_pallet_coupling; diff --git a/polkadot/cli/Cargo.toml b/polkadot/cli/Cargo.toml index ee2673ac20ff3e5b04b0ecfc144850acf07d165f..b9232f95981bf66427ffc67cc98282efb8cf330b 100644 --- a/polkadot/cli/Cargo.toml +++ b/polkadot/cli/Cargo.toml @@ -21,7 +21,7 @@ crate-type = ["cdylib", "rlib"] cfg-if = "1.0" clap = { version = "4.5.1", features = ["derive"], optional = true } log = { workspace = true, default-features = true } -thiserror = "1.0.48" +thiserror = { workspace = true } futures = "0.3.21" pyro = { package = "pyroscope", version = "0.5.3", optional = true } pyroscope_pprofrs = { version = "0.2", optional = true } diff --git a/polkadot/cli/src/cli.rs b/polkadot/cli/src/cli.rs index 30f35ebcb6ffa95f0f2384821e168353d95df94b..e1bc2309a942a8255dd5c99a0db0725aa3b1b37d 100644 --- a/polkadot/cli/src/cli.rs +++ b/polkadot/cli/src/cli.rs @@ -52,7 +52,7 @@ pub enum Subcommand { /// Try-runtime has migrated to a standalone CLI /// (). The subcommand exists as a stub and - /// deprecation notice. It will be removed entirely some time after Janurary 2024. + /// deprecation notice. It will be removed entirely some time after January 2024. TryRuntime, /// Key management CLI utilities diff --git a/polkadot/doc/testing.md b/polkadot/doc/testing.md deleted file mode 100644 index 76703b1b4398a0cac8423183a5d2febfabab0e6c..0000000000000000000000000000000000000000 --- a/polkadot/doc/testing.md +++ /dev/null @@ -1,302 +0,0 @@ -# Testing - -Testing is an essential tool to assure correctness. This document describes how we test the Polkadot code, whether -locally, at scale, and/or automatically in CI. - -## Scopes - -The testing strategy for Polkadot is 4-fold: - -### Unit testing (1) - -Boring, small scale correctness tests of individual functions. It is usually -enough to run `cargo test` in the crate you are testing. - -For full coverage you may have to pass some additional features. For example: - -```sh -cargo test --features ci-only-tests -``` - -### Integration tests - -There are the following variants of integration tests: - -#### Subsystem tests (2) - -One particular subsystem (subsystem under test) interacts with a mocked overseer that is made to assert incoming and -outgoing messages of the subsystem under test. See e.g. the `statement-distribution` tests. - -#### Behavior tests (3) - -Launching small scale networks, with multiple adversarial nodes. This should include tests around the thresholds in -order to evaluate the error handling once certain assumed invariants fail. - -Currently, we commonly use **zombienet** to run mini test-networks, whether locally or in CI. To run on your machine: - -- First, make sure you have [zombienet][zombienet] installed. - -- Now, all the required binaries must be installed in your $PATH. You must run the following from the `polkadot/` -directory in order to test your changes. (Not `zombienet setup`, or you will get the released binaries without your -local changes!) - -```sh -cargo install --path . --locked -``` - -- You will also need to install whatever binaries are required for your specific tests. For example, to install -`undying-collator`, from `polkadot/`, run: - -```sh -cargo install --path ./parachain/test-parachains/undying/collator --locked -``` - -- Finally, run the zombienet test from the `polkadot` directory: - -```sh -RUST_LOG=parachain::pvf=trace zombienet --provider=native spawn zombienet_tests/functional/0001-parachains-pvf.toml -``` - -- You can pick a validator node like `alice` from the output and view its logs -(`tail -f `) or metrics. Make sure there is nothing funny in the logs -(try `grep WARN `). - -#### Testing at scale (4) - -Launching many nodes with configurable network speed and node features in a cluster of nodes. At this scale the -[Simnet][simnet] comes into play which launches a full cluster of nodes. The scale is handled by spawning a kubernetes -cluster and the meta description is covered by [Gurke][Gurke]. Asserts are made using Grafana rules, based on the -existing prometheus metrics. This can be extended by adding an additional service translating `jaeger` spans into -addition prometheus avoiding additional Polkadot source changes. - -_Behavior tests_ and _testing at scale_ have naturally soft boundary. The most significant difference is the presence of -a real network and the number of nodes, since a single host often not capable to run multiple nodes at once. - -## Observing Logs - -To verify expected behavior it's often useful to observe logs. To avoid too many -logs at once, you can run one test at a time: - -1. Add `sp_tracing::try_init_simple();` to the beginning of a test -2. Specify `RUST_LOG=::=trace` before the cargo command. - -For example: - -```sh -RUST_LOG=parachain::pvf=trace cargo test execute_can_run_serially -``` - -For more info on how our logs work, check [the docs][logs]. - -## Coverage - -Coverage gives a _hint_ of the actually covered source lines by tests and test applications. - -The state of the art is currently tarpaulin which unfortunately yields a lot of false negatives. Lines that -are in fact covered, marked as uncovered due to a mere linebreak in a statement can cause these artifacts. This leads to -lower coverage percentages than there actually is. - -Since late 2020 rust has gained [MIR based coverage tooling]( -https://blog.rust-lang.org/inside-rust/2020/11/12/source-based-code-coverage.html). - -```sh -# setup -rustup component add llvm-tools-preview -cargo install grcov miniserve - -export CARGO_INCREMENTAL=0 -# wasm is not happy with the instrumentation -export SKIP_BUILD_WASM=true -export BUILD_DUMMY_WASM_BINARY=true -# the actully collected coverage data -export LLVM_PROFILE_FILE="llvmcoveragedata-%p-%m.profraw" -# build wasm without instrumentation -export WASM_TARGET_DIRECTORY=/tmp/wasm -cargo +nightly build -# required rust flags -export RUSTFLAGS="-Zinstrument-coverage" -# assure target dir is clean -rm -r target/{debug,tests} -# run tests to get coverage data -cargo +nightly test --all - -# create the *html* report out of all the test binaries -# mostly useful for local inspection -grcov . --binary-path ./target/debug -s . -t html --branch --ignore-not-existing -o ./coverage/ -miniserve -r ./coverage - -# create a *codecov* compatible report -grcov . --binary-path ./target/debug/ -s . -t lcov --branch --ignore-not-existing --ignore "/*" -o lcov.info -``` - -The test coverage in `lcov` can the be published to . - -```sh -bash <(curl -s https://codecov.io/bash) -f lcov.info -``` - -or just printed as part of the PR using a github action i.e. -[`jest-lcov-reporter`](https://github.com/marketplace/actions/jest-lcov-reporter). - -For full examples on how to use [`grcov` /w Polkadot specifics see the github -repo](https://github.com/mozilla/grcov#coverallscodecov-output). - -## Fuzzing - -Fuzzing is an approach to verify correctness against arbitrary or partially structured inputs. - -Currently implemented fuzzing targets: - -- `erasure-coding` - -The tooling of choice here is `honggfuzz-rs` as it allows _fastest_ coverage according to "some paper" which is a -positive feature when run as part of PRs. - -Fuzzing is generally not applicable for data secured by cryptographic hashes or signatures. Either the input has to be -specifically crafted, such that the discarded input percentage stays in an acceptable range. System level fuzzing is -hence simply not feasible due to the amount of state that is required. - -Other candidates to implement fuzzing are: - -- `rpc` -- ... - -## Performance metrics - -There are various ways of performance metrics. - -- timing with `criterion` -- cache hits/misses w/ `iai` harness or `criterion-perf` -- `coz` a performance based compiler - -Most of them are standard tools to aid in the creation of statistical tests regarding change in time of certain unit -tests. - -`coz` is meant for runtime. In our case, the system is far too large to yield a sufficient number of measurements in -finite time. An alternative approach could be to record incoming package streams per subsystem and store dumps of them, -which in return could be replayed repeatedly at an accelerated speed, with which enough metrics could be obtained to -yield information on which areas would improve the metrics. This unfortunately will not yield much information, since -most if not all of the subsystem code is linear based on the input to generate one or multiple output messages, it is -unlikely to get any useful metrics without mocking a sufficiently large part of the other subsystem which overlaps with -[#Integration tests] which is unfortunately not repeatable as of now. As such the effort gain seems low and this is not -pursued at the current time. - -## Writing small scope integration tests with preconfigured workers - -Requirements: - -- spawn nodes with preconfigured behaviors -- allow multiple types of configuration to be specified -- allow extendability via external crates -- ... - ---- - -## Implementation of different behavior strain nodes - -### Goals - -The main goals are is to allow creating a test node which exhibits a certain behavior by utilizing a subset of _wrapped_ -or _replaced_ subsystems easily. The runtime must not matter at all for these tests and should be simplistic. The -execution must be fast, this mostly means to assure a close to zero network latency as well as shorting the block time -and epoch times down to a few `100ms` and a few dozend blocks per epoch. - -### Approach - -#### MVP - -A simple small scale builder pattern would suffice for stage one implementation of allowing to replace individual -subsystems. An alternative would be to harness the existing `AllSubsystems` type and replace the subsystems as needed. - -#### Full `proc-macro` implementation - -`Overseer` is a common pattern. It could be extracted as `proc` macro and generative `proc-macro`. This would replace -the `AllSubsystems` type as well as implicitly create the `AllMessages` enum as `AllSubsystemsGen` does today. - -The implementation is yet to be completed, see the [implementation PR](https://github.com/paritytech/polkadot/pull/2962) -for details. - -##### Declare an overseer implementation - -```rust -struct BehaveMaleficient; - -impl OverseerGen for BehaveMaleficient { - fn generate<'a, Spawner, RuntimeClient>( - &self, - args: OverseerGenArgs<'a, Spawner, RuntimeClient>, - ) -> Result<(Overseer>, OverseerHandler), Error> - where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, - Spawner: 'static + overseer::gen::Spawner + Clone + Unpin, - { - let spawner = args.spawner.clone(); - let leaves = args.leaves.clone(); - let runtime_client = args.runtime_client.clone(); - let registry = args.registry.clone(); - let candidate_validation_config = args.candidate_validation_config.clone(); - // modify the subsystem(s) as needed: - let all_subsystems = create_default_subsystems(args)?. - // or spawn an entirely new set - - replace_candidate_validation( - // create the filtered subsystem - FilteredSubsystem::new( - CandidateValidationSubsystem::with_config( - candidate_validation_config, - Metrics::register(registry)?, - ), - // an implementation of - Skippy::default(), - ), - ); - - Overseer::new(leaves, all_subsystems, registry, runtime_client, spawner) - .map_err(|e| e.into()) - - // A builder pattern will simplify this further - // WIP https://github.com/paritytech/polkadot/pull/2962 - } -} - -fn main() -> eyre::Result<()> { - color_eyre::install()?; - let cli = Cli::from_args(); - assert_matches::assert_matches!(cli.subcommand, None); - polkadot_cli::run_node(cli, BehaveMaleficient)?; - Ok(()) -} -``` - -[`variant-a`](../node/malus/src/variant-a.rs) is a fully working example. - -#### Simnet - -Spawn a kubernetes cluster based on a meta description using [Gurke] with the [Simnet] scripts. - -Coordinated attacks of multiple nodes or subsystems must be made possible via a side-channel, that is out of scope for -this document. - -The individual node configurations are done as targets with a particular builder configuration. - -#### Behavior tests w/o Simnet - -Commonly this will require multiple nodes, and most machines are limited to running two or three nodes concurrently. -Hence, this is not the common case and is just an implementation _idea_. - -```rust -behavior_testcase!{ -"TestRuntime" => -"Alice": , -"Bob": , -"Charles": Default, -"David": "Charles", -"Eve": "Bob", -} -``` - -[zombienet]: https://github.com/paritytech/zombienet -[Gurke]: https://github.com/paritytech/gurke -[simnet]: https://github.com/paritytech/simnet_scripts -[logs]: https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/node/gum/src/lib.rs diff --git a/polkadot/erasure-coding/Cargo.toml b/polkadot/erasure-coding/Cargo.toml index 8705c14c3a4d026aa6dda5cc5232e9fdf10baf82..677f15c4b9a1446c7828c78f92933f7811733ee8 100644 --- a/polkadot/erasure-coding/Cargo.toml +++ b/polkadot/erasure-coding/Cargo.toml @@ -16,7 +16,7 @@ novelpoly = { package = "reed-solomon-novelpoly", version = "2.0.0" } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "std"] } sp-core = { path = "../../substrate/primitives/core" } sp-trie = { path = "../../substrate/primitives/trie" } -thiserror = "1.0.48" +thiserror = { workspace = true } [dev-dependencies] criterion = { version = "0.4.0", default-features = false, features = ["cargo_bench_support"] } diff --git a/polkadot/node/collation-generation/Cargo.toml b/polkadot/node/collation-generation/Cargo.toml index 8c6644ad6da59f51c2eb0ebb583fed0cd211d821..8df0c2b1edae47cad98881e7243eb1dc68449e9e 100644 --- a/polkadot/node/collation-generation/Cargo.toml +++ b/polkadot/node/collation-generation/Cargo.toml @@ -19,7 +19,7 @@ polkadot-node-subsystem-util = { path = "../subsystem-util" } polkadot-primitives = { path = "../../primitives" } sp-core = { path = "../../../substrate/primitives/core" } sp-maybe-compressed-blob = { path = "../../../substrate/primitives/maybe-compressed-blob" } -thiserror = "1.0.48" +thiserror = { workspace = true } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } [dev-dependencies] diff --git a/polkadot/node/core/approval-voting/Cargo.toml b/polkadot/node/core/approval-voting/Cargo.toml index 1010779a1b3650042f789bd13cc68e531b382d15..2a5b6198b9a827788696bdf5e1eeeb5829a9da6d 100644 --- a/polkadot/node/core/approval-voting/Cargo.toml +++ b/polkadot/node/core/approval-voting/Cargo.toml @@ -20,7 +20,7 @@ merlin = "3.0" schnorrkel = "0.11.4" kvdb = "0.13.0" derive_more = "0.99.17" -thiserror = "1.0.48" +thiserror = { workspace = true } itertools = "0.10.5" polkadot-node-subsystem = { path = "../../subsystem" } @@ -35,7 +35,7 @@ sp-consensus = { path = "../../../../substrate/primitives/consensus/common", def sp-consensus-slots = { path = "../../../../substrate/primitives/consensus/slots", default-features = false } sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto", default-features = false, features = ["full_crypto"] } sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } -# should match schnorrkel +# rand_core should match schnorrkel rand_core = "0.6.2" rand_chacha = { version = "0.3.1" } rand = "0.8.5" diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs index 456ae319787b07740bb0817dcfbe260500eb9dd8..8cc16a6e1ec199776003204e8d9c870fb0f62114 100644 --- a/polkadot/node/core/approval-voting/src/lib.rs +++ b/polkadot/node/core/approval-voting/src/lib.rs @@ -1253,13 +1253,20 @@ async fn handle_actions( Action::BecomeActive => { *mode = Mode::Active; - let messages = distribution_messages_for_activation( + let (messages, next_actions) = distribution_messages_for_activation( + ctx, overlayed_db, state, delayed_approvals_timers, - )?; + session_info_provider, + ) + .await?; ctx.send_messages(messages.into_iter()).await; + let next_actions: Vec = + next_actions.into_iter().map(|v| v.clone()).chain(actions_iter).collect(); + + actions_iter = next_actions.into_iter(); }, Action::Conclude => { conclude = true; @@ -1313,15 +1320,19 @@ fn get_assignment_core_indices( } } -fn distribution_messages_for_activation( +#[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)] +async fn distribution_messages_for_activation( + ctx: &mut Context, db: &OverlayedBackend<'_, impl Backend>, state: &State, delayed_approvals_timers: &mut DelayedApprovalTimer, -) -> SubsystemResult> { + session_info_provider: &mut RuntimeInfo, +) -> SubsystemResult<(Vec, Vec)> { let all_blocks: Vec = db.load_all_blocks()?; let mut approval_meta = Vec::with_capacity(all_blocks.len()); let mut messages = Vec::new(); + let mut actions = Vec::new(); messages.push(ApprovalDistributionMessage::NewBlocks(Vec::new())); // dummy value. @@ -1396,16 +1407,60 @@ fn distribution_messages_for_activation( &claimed_core_indices, &block_entry, ) { - Ok(bitfield) => messages.push( - ApprovalDistributionMessage::DistributeAssignment( - IndirectAssignmentCertV2 { - block_hash, - validator: assignment.validator_index(), - cert: assignment.cert().clone(), - }, - bitfield, - ), - ), + Ok(bitfield) => { + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?candidate_entry.candidate_receipt().hash(), + ?block_hash, + "Discovered, triggered assignment, not approved yet", + ); + + let indirect_cert = IndirectAssignmentCertV2 { + block_hash, + validator: assignment.validator_index(), + cert: assignment.cert().clone(), + }; + messages.push( + ApprovalDistributionMessage::DistributeAssignment( + indirect_cert.clone(), + bitfield.clone(), + ), + ); + + if !block_entry + .candidate_is_pending_signature(*candidate_hash) + { + let ExtendedSessionInfo { ref executor_params, .. } = + match get_extended_session_info( + session_info_provider, + ctx.sender(), + block_entry.block_hash(), + block_entry.session(), + ) + .await + { + Some(i) => i, + None => continue, + }; + + actions.push(Action::LaunchApproval { + claimed_candidate_indices: bitfield, + candidate_hash: candidate_entry + .candidate_receipt() + .hash(), + indirect_cert, + assignment_tranche: assignment.tranche(), + relay_block_hash: block_hash, + session: block_entry.session(), + executor_params: executor_params.clone(), + candidate: candidate_entry + .candidate_receipt() + .clone(), + backing_group: approval_entry.backing_group(), + distribute_assignment: false, + }); + } + }, Err(err) => { // Should never happen. If we fail here it means the // assignment is null (no cores claimed). @@ -1496,7 +1551,7 @@ fn distribution_messages_for_activation( } messages[0] = ApprovalDistributionMessage::NewBlocks(approval_meta); - Ok(messages) + Ok((messages, actions)) } // Handle an incoming signal from the overseer. Returns true if execution should conclude. diff --git a/polkadot/node/core/approval-voting/src/persisted_entries.rs b/polkadot/node/core/approval-voting/src/persisted_entries.rs index ef47bdb2213a153dc7223c1018ba8ec9b341a5aa..b924a1b52ccf49a242adecfd534b498004ec3d87 100644 --- a/polkadot/node/core/approval-voting/src/persisted_entries.rs +++ b/polkadot/node/core/approval-voting/src/persisted_entries.rs @@ -588,6 +588,13 @@ impl BlockEntry { !self.candidates_pending_signature.is_empty() } + /// Returns true if candidate hash is in the queue for a signature. + pub fn candidate_is_pending_signature(&self, candidate_hash: CandidateHash) -> bool { + self.candidates_pending_signature + .values() + .any(|context| context.candidate_hash == candidate_hash) + } + /// Candidate hashes for candidates pending signatures fn candidate_hashes_pending_signature(&self) -> Vec { self.candidates_pending_signature diff --git a/polkadot/node/core/approval-voting/src/tests.rs b/polkadot/node/core/approval-voting/src/tests.rs index 9220e84a2554a48c14468567e87e74319ce94151..c9053232a4c8752f03134047dbc64b020377740a 100644 --- a/polkadot/node/core/approval-voting/src/tests.rs +++ b/polkadot/node/core/approval-voting/src/tests.rs @@ -78,6 +78,7 @@ struct TestSyncOracle { struct TestSyncOracleHandle { done_syncing_receiver: oneshot::Receiver<()>, + is_major_syncing: Arc, } impl TestSyncOracleHandle { @@ -108,8 +109,9 @@ impl SyncOracle for TestSyncOracle { fn make_sync_oracle(val: bool) -> (Box, TestSyncOracleHandle) { let (tx, rx) = oneshot::channel(); let flag = Arc::new(AtomicBool::new(val)); - let oracle = TestSyncOracle { flag, done_syncing_sender: Arc::new(Mutex::new(Some(tx))) }; - let handle = TestSyncOracleHandle { done_syncing_receiver: rx }; + let oracle = + TestSyncOracle { flag: flag.clone(), done_syncing_sender: Arc::new(Mutex::new(Some(tx))) }; + let handle = TestSyncOracleHandle { done_syncing_receiver: rx, is_major_syncing: flag }; (Box::new(oracle), handle) } @@ -465,6 +467,7 @@ struct HarnessConfigBuilder { clock: Option, backend: Option, assignment_criteria: Option>, + major_syncing: bool, } impl HarnessConfigBuilder { @@ -476,9 +479,19 @@ impl HarnessConfigBuilder { self } + pub fn major_syncing(&mut self, value: bool) -> &mut Self { + self.major_syncing = value; + self + } + + pub fn backend(&mut self, store: TestStore) -> &mut Self { + self.backend = Some(store); + self + } + pub fn build(&mut self) -> HarnessConfig { let (sync_oracle, sync_oracle_handle) = - self.sync_oracle.take().unwrap_or_else(|| make_sync_oracle(false)); + self.sync_oracle.take().unwrap_or_else(|| make_sync_oracle(self.major_syncing)); let assignment_criteria = self .assignment_criteria @@ -736,11 +749,13 @@ struct BlockConfig { slot: Slot, candidates: Option>, session_info: Option, + end_syncing: bool, } struct ChainBuilder { blocks_by_hash: HashMap, blocks_at_height: BTreeMap>, + is_major_syncing: Arc, } impl ChainBuilder { @@ -748,16 +763,28 @@ impl ChainBuilder { const GENESIS_PARENT_HASH: Hash = Hash::repeat_byte(0x00); pub fn new() -> Self { - let mut builder = - Self { blocks_by_hash: HashMap::new(), blocks_at_height: BTreeMap::new() }; + let mut builder = Self { + blocks_by_hash: HashMap::new(), + blocks_at_height: BTreeMap::new(), + is_major_syncing: Arc::new(AtomicBool::new(false)), + }; builder.add_block_inner( Self::GENESIS_HASH, Self::GENESIS_PARENT_HASH, 0, - BlockConfig { slot: Slot::from(0), candidates: None, session_info: None }, + BlockConfig { + slot: Slot::from(0), + candidates: None, + session_info: None, + end_syncing: false, + }, ); builder } + pub fn major_syncing(&mut self, major_syncing: Arc) -> &mut Self { + self.is_major_syncing = major_syncing; + self + } pub fn add_block( &mut self, @@ -808,8 +835,16 @@ impl ChainBuilder { } ancestry.reverse(); - import_block(overseer, ancestry.as_ref(), *number, block_config, false, i > 0) - .await; + import_block( + overseer, + ancestry.as_ref(), + *number, + block_config, + false, + i > 0, + self.is_major_syncing.clone(), + ) + .await; let _: Option<()> = future::pending().timeout(Duration::from_millis(100)).await; } } @@ -863,6 +898,7 @@ async fn import_block( config: &BlockConfig, gap: bool, fork: bool, + major_syncing: Arc, ) { let (new_head, new_header) = &hashes[hashes.len() - 1]; let candidates = config.candidates.clone().unwrap_or(vec![( @@ -891,6 +927,12 @@ async fn import_block( h_tx.send(Ok(Some(new_header.clone()))).unwrap(); } ); + + let is_major_syncing = major_syncing.load(Ordering::SeqCst); + if config.end_syncing { + major_syncing.store(false, Ordering::SeqCst); + } + if !fork { let mut _ancestry_step = 0; if gap { @@ -931,7 +973,7 @@ async fn import_block( } } - if number > 0 { + if number > 0 && !is_major_syncing { assert_matches!( overseer_recv(overseer).await, AllMessages::RuntimeApi( @@ -944,7 +986,6 @@ async fn import_block( c_tx.send(Ok(inclusion_events)).unwrap(); } ); - assert_matches!( overseer_recv(overseer).await, AllMessages::RuntimeApi( @@ -984,14 +1025,14 @@ async fn import_block( ); } - if number == 0 { + if number == 0 && !is_major_syncing { assert_matches!( overseer_recv(overseer).await, AllMessages::ApprovalDistribution(ApprovalDistributionMessage::NewBlocks(v)) => { assert_eq!(v.len(), 0usize); } ); - } else { + } else if number > 0 && !is_major_syncing { if !fork { // SessionInfo won't be called for forks - it's already cached assert_matches!( @@ -1031,20 +1072,23 @@ async fn import_block( ); } - assert_matches!( - overseer_recv(overseer).await, - AllMessages::ApprovalDistribution( - ApprovalDistributionMessage::NewBlocks(mut approval_vec) - ) => { - assert_eq!(approval_vec.len(), 1); - let metadata = approval_vec.pop().unwrap(); - let hash = &hashes[number as usize]; - let parent_hash = &hashes[(number - 1) as usize]; - assert_eq!(metadata.hash, hash.0.clone()); - assert_eq!(metadata.parent_hash, parent_hash.0.clone()); - assert_eq!(metadata.slot, config.slot); - } - ); + if !is_major_syncing { + assert_matches!( + overseer_recv(overseer).await, + + AllMessages::ApprovalDistribution( + ApprovalDistributionMessage::NewBlocks(mut approval_vec) + ) => { + assert_eq!(approval_vec.len(), 1); + let metadata = approval_vec.pop().unwrap(); + let hash = &hashes[number as usize]; + let parent_hash = &hashes[(number - 1) as usize]; + assert_eq!(metadata.hash, hash.0.clone()); + assert_eq!(metadata.parent_hash, parent_hash.0.clone()); + assert_eq!(metadata.slot, config.slot); + } + ); + } } } @@ -1072,7 +1116,7 @@ fn subsystem_rejects_bad_assignment_ok_criteria() { block_hash, head, 1, - BlockConfig { slot, candidates: None, session_info: None }, + BlockConfig { slot, candidates: None, session_info: None, end_syncing: false }, ); builder.build(&mut virtual_overseer).await; @@ -1135,7 +1179,7 @@ fn subsystem_rejects_bad_assignment_err_criteria() { block_hash, head, 1, - BlockConfig { slot, candidates: None, session_info: None }, + BlockConfig { slot, candidates: None, session_info: None, end_syncing: false }, ); builder.build(&mut virtual_overseer).await; @@ -1240,6 +1284,7 @@ fn subsystem_rejects_approval_if_no_candidate_entry() { slot, candidates: Some(vec![(candidate_descriptor, CoreIndex(1), GroupIndex(1))]), session_info: None, + end_syncing: false, }, ); builder.build(&mut virtual_overseer).await; @@ -1345,7 +1390,12 @@ fn subsystem_rejects_approval_before_assignment() { block_hash, ChainBuilder::GENESIS_HASH, 1, - BlockConfig { slot: Slot::from(1), candidates: None, session_info: None }, + BlockConfig { + slot: Slot::from(1), + candidates: None, + session_info: None, + end_syncing: false, + }, ) .build(&mut virtual_overseer) .await; @@ -1398,7 +1448,12 @@ fn subsystem_rejects_assignment_in_future() { block_hash, ChainBuilder::GENESIS_HASH, 1, - BlockConfig { slot: Slot::from(0), candidates: None, session_info: None }, + BlockConfig { + slot: Slot::from(0), + candidates: None, + session_info: None, + end_syncing: false, + }, ) .build(&mut virtual_overseer) .await; @@ -1472,6 +1527,7 @@ fn subsystem_accepts_duplicate_assignment() { (candidate_receipt2, CoreIndex(1), GroupIndex(1)), ]), session_info: None, + end_syncing: false, }, ) .build(&mut virtual_overseer) @@ -1537,7 +1593,12 @@ fn subsystem_rejects_assignment_with_unknown_candidate() { block_hash, ChainBuilder::GENESIS_HASH, 1, - BlockConfig { slot: Slot::from(1), candidates: None, session_info: None }, + BlockConfig { + slot: Slot::from(1), + candidates: None, + session_info: None, + end_syncing: false, + }, ) .build(&mut virtual_overseer) .await; @@ -1582,7 +1643,12 @@ fn subsystem_rejects_oversized_bitfields() { block_hash, ChainBuilder::GENESIS_HASH, 1, - BlockConfig { slot: Slot::from(1), candidates: None, session_info: None }, + BlockConfig { + slot: Slot::from(1), + candidates: None, + session_info: None, + end_syncing: false, + }, ) .build(&mut virtual_overseer) .await; @@ -1650,7 +1716,12 @@ fn subsystem_accepts_and_imports_approval_after_assignment() { block_hash, ChainBuilder::GENESIS_HASH, 1, - BlockConfig { slot: Slot::from(1), candidates: None, session_info: None }, + BlockConfig { + slot: Slot::from(1), + candidates: None, + session_info: None, + end_syncing: false, + }, ) .build(&mut virtual_overseer) .await; @@ -1741,6 +1812,7 @@ fn subsystem_second_approval_import_only_schedules_wakeups() { slot: Slot::from(0), candidates: None, session_info: Some(session_info), + end_syncing: false, }, ) .build(&mut virtual_overseer) @@ -1824,7 +1896,12 @@ fn subsystem_assignment_import_updates_candidate_entry_and_schedules_wakeup() { block_hash, ChainBuilder::GENESIS_HASH, 1, - BlockConfig { slot: Slot::from(1), candidates: None, session_info: None }, + BlockConfig { + slot: Slot::from(1), + candidates: None, + session_info: None, + end_syncing: false, + }, ) .build(&mut virtual_overseer) .await; @@ -1873,7 +1950,12 @@ fn subsystem_process_wakeup_schedules_wakeup() { block_hash, ChainBuilder::GENESIS_HASH, 1, - BlockConfig { slot: Slot::from(1), candidates: None, session_info: None }, + BlockConfig { + slot: Slot::from(1), + candidates: None, + session_info: None, + end_syncing: false, + }, ) .build(&mut virtual_overseer) .await; @@ -1925,7 +2007,7 @@ fn linear_import_act_on_leaf() { hash, head, i, - BlockConfig { slot, candidates: None, session_info: None }, + BlockConfig { slot, candidates: None, session_info: None, end_syncing: false }, ); head = hash; } @@ -1983,7 +2065,7 @@ fn forkful_import_at_same_height_act_on_leaf() { hash, head, i, - BlockConfig { slot, candidates: None, session_info: None }, + BlockConfig { slot, candidates: None, session_info: None, end_syncing: false }, ); head = hash; } @@ -1997,7 +2079,7 @@ fn forkful_import_at_same_height_act_on_leaf() { hash, head, session, - BlockConfig { slot, candidates: None, session_info: None }, + BlockConfig { slot, candidates: None, session_info: None, end_syncing: false }, ); } builder.build(&mut virtual_overseer).await; @@ -2168,6 +2250,7 @@ fn import_checked_approval_updates_entries_and_schedules() { slot, candidates: Some(vec![(candidate_descriptor, CoreIndex(0), GroupIndex(0))]), session_info: Some(session_info), + end_syncing: false, }, ); builder.build(&mut virtual_overseer).await; @@ -2323,6 +2406,7 @@ fn subsystem_import_checked_approval_sets_one_block_bit_at_a_time() { (candidate_receipt2, CoreIndex(1), GroupIndex(1)), ]), session_info: Some(session_info), + end_syncing: false, }, ) .build(&mut virtual_overseer) @@ -2445,6 +2529,7 @@ fn approved_ancestor_test( slot: Slot::from(i as u64), candidates: Some(vec![(candidate_receipt, CoreIndex(0), GroupIndex(0))]), session_info: None, + end_syncing: false, }, ); } @@ -2623,13 +2708,19 @@ fn subsystem_validate_approvals_cache() { slot, candidates: candidates.clone(), session_info: Some(session_info.clone()), + end_syncing: false, }, ) .add_block( fork_block_hash, ChainBuilder::GENESIS_HASH, 1, - BlockConfig { slot, candidates, session_info: Some(session_info) }, + BlockConfig { + slot, + candidates, + session_info: Some(session_info), + end_syncing: false, + }, ) .build(&mut virtual_overseer) .await; @@ -2740,6 +2831,7 @@ fn subsystem_doesnt_distribute_duplicate_compact_assignments() { (candidate_receipt2.clone(), CoreIndex(1), GroupIndex(1)), ]), session_info: None, + end_syncing: false, }, ) .build(&mut virtual_overseer) @@ -2997,6 +3089,7 @@ where slot, candidates: Some(vec![(candidate_receipt, CoreIndex(0), GroupIndex(2))]), session_info: Some(session_info), + end_syncing: false, }, ) .build(&mut virtual_overseer) @@ -3314,6 +3407,7 @@ fn pre_covers_dont_stall_approval() { slot, candidates: Some(vec![(candidate_descriptor, CoreIndex(0), GroupIndex(0))]), session_info: Some(session_info), + end_syncing: false, }, ); builder.build(&mut virtual_overseer).await; @@ -3491,6 +3585,7 @@ fn waits_until_approving_assignments_are_old_enough() { slot, candidates: Some(vec![(candidate_descriptor, CoreIndex(0), GroupIndex(0))]), session_info: Some(session_info), + end_syncing: false, }, ); builder.build(&mut virtual_overseer).await; @@ -3705,6 +3800,7 @@ fn test_approval_is_sent_on_max_approval_coalesce_count() { slot, candidates: candidates.clone(), session_info: Some(session_info.clone()), + end_syncing: false, }, ) .build(&mut virtual_overseer) @@ -3943,8 +4039,7 @@ fn test_approval_is_sent_on_max_approval_coalesce_wait() { let store = config.backend(); test_harness(config, |test_harness| async move { - let TestHarness { mut virtual_overseer, clock, sync_oracle_handle: _sync_oracle_handle } = - test_harness; + let TestHarness { mut virtual_overseer, clock, sync_oracle_handle: _ } = test_harness; assert_matches!( overseer_recv(&mut virtual_overseer).await, @@ -4006,6 +4101,7 @@ fn test_approval_is_sent_on_max_approval_coalesce_wait() { slot, candidates: candidates.clone(), session_info: Some(session_info.clone()), + end_syncing: false, }, ) .build(&mut virtual_overseer) @@ -4040,3 +4136,569 @@ fn test_approval_is_sent_on_max_approval_coalesce_wait() { virtual_overseer }); } + +// Builds a chain with a fork where both relay blocks include the same candidate. +async fn build_chain_with_two_blocks_with_one_candidate_each( + block_hash1: Hash, + block_hash2: Hash, + slot: Slot, + sync_oracle_handle: TestSyncOracleHandle, + candidate_receipt: CandidateReceipt, +) -> (ChainBuilder, SessionInfo) { + let validators = vec![ + Sr25519Keyring::Alice, + Sr25519Keyring::Bob, + Sr25519Keyring::Charlie, + Sr25519Keyring::Dave, + Sr25519Keyring::Eve, + ]; + let session_info = SessionInfo { + validator_groups: IndexedVec::>::from(vec![ + vec![ValidatorIndex(0), ValidatorIndex(1)], + vec![ValidatorIndex(2)], + vec![ValidatorIndex(3), ValidatorIndex(4)], + ]), + ..session_info(&validators) + }; + + let candidates = Some(vec![(candidate_receipt.clone(), CoreIndex(0), GroupIndex(0))]); + let mut chain_builder = ChainBuilder::new(); + + chain_builder + .major_syncing(sync_oracle_handle.is_major_syncing.clone()) + .add_block( + block_hash1, + ChainBuilder::GENESIS_HASH, + 1, + BlockConfig { + slot, + candidates: candidates.clone(), + session_info: Some(session_info.clone()), + end_syncing: false, + }, + ) + .add_block( + block_hash2, + ChainBuilder::GENESIS_HASH, + 1, + BlockConfig { + slot, + candidates, + session_info: Some(session_info.clone()), + end_syncing: true, + }, + ); + (chain_builder, session_info) +} + +async fn setup_overseer_with_two_blocks_each_with_one_assignment_triggered( + virtual_overseer: &mut VirtualOverseer, + store: TestStore, + clock: &Box, + sync_oracle_handle: TestSyncOracleHandle, +) { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber(rx)) => { + rx.send(Ok(0)).unwrap(); + } + ); + + let block_hash = Hash::repeat_byte(0x01); + let fork_block_hash = Hash::repeat_byte(0x02); + let candidate_commitments = CandidateCommitments::default(); + let mut candidate_receipt = dummy_candidate_receipt(block_hash); + candidate_receipt.commitments_hash = candidate_commitments.hash(); + let candidate_hash = candidate_receipt.hash(); + let slot = Slot::from(1); + let (chain_builder, _session_info) = build_chain_with_two_blocks_with_one_candidate_each( + block_hash, + fork_block_hash, + slot, + sync_oracle_handle, + candidate_receipt, + ) + .await; + chain_builder.build(virtual_overseer).await; + + assert!(!clock.inner.lock().current_wakeup_is(1)); + clock.inner.lock().wakeup_all(1); + + assert!(clock.inner.lock().current_wakeup_is(slot_to_tick(slot))); + clock.inner.lock().wakeup_all(slot_to_tick(slot)); + + futures_timer::Delay::new(Duration::from_millis(200)).await; + + clock.inner.lock().wakeup_all(slot_to_tick(slot + 2)); + + assert_eq!(clock.inner.lock().wakeups.len(), 0); + + futures_timer::Delay::new(Duration::from_millis(200)).await; + + let candidate_entry = store.load_candidate_entry(&candidate_hash).unwrap().unwrap(); + let our_assignment = + candidate_entry.approval_entry(&block_hash).unwrap().our_assignment().unwrap(); + assert!(our_assignment.triggered()); +} + +// Tests that for candidates that we did not approve yet, for which we triggered the assignment and +// the approval work we restart the work to approve it. +#[test] +fn subsystem_relaunches_approval_work_on_restart() { + let assignment_criteria = Box::new(MockAssignmentCriteria( + || { + let mut assignments = HashMap::new(); + let _ = assignments.insert( + CoreIndex(0), + approval_db::v2::OurAssignment { + cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0 }) + .into(), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + + let _ = assignments.insert( + CoreIndex(0), + approval_db::v2::OurAssignment { + cert: garbage_assignment_cert_v2(AssignmentCertKindV2::RelayVRFModuloCompact { + core_bitfield: vec![CoreIndex(0), CoreIndex(1), CoreIndex(2)] + .try_into() + .unwrap(), + }), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + assignments + }, + |_| Ok(0), + )); + let config = HarnessConfigBuilder::default().assignment_criteria(assignment_criteria).build(); + let store = config.backend(); + let store_clone = config.backend(); + + test_harness(config, |test_harness| async move { + let TestHarness { mut virtual_overseer, clock, sync_oracle_handle } = test_harness; + + setup_overseer_with_two_blocks_each_with_one_assignment_triggered( + &mut virtual_overseer, + store, + &clock, + sync_oracle_handle, + ) + .await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + _, + )) => { + } + ); + + recover_available_data(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + _ + )) => { + } + ); + + // Bail early after the assignment has been distributed but before we answer with the mocked + // approval from CandidateValidation. + virtual_overseer + }); + + // Restart a new approval voting subsystem with the same database and major syncing true untill + // the last leaf. + let config = HarnessConfigBuilder::default().backend(store_clone).major_syncing(true).build(); + + test_harness(config, |test_harness| async move { + let TestHarness { mut virtual_overseer, clock, sync_oracle_handle } = test_harness; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber(rx)) => { + rx.send(Ok(0)).unwrap(); + } + ); + + let block_hash = Hash::repeat_byte(0x01); + let fork_block_hash = Hash::repeat_byte(0x02); + let candidate_commitments = CandidateCommitments::default(); + let mut candidate_receipt = dummy_candidate_receipt(block_hash); + candidate_receipt.commitments_hash = candidate_commitments.hash(); + let slot = Slot::from(1); + clock.inner.lock().set_tick(slot_to_tick(slot + 2)); + let (chain_builder, session_info) = build_chain_with_two_blocks_with_one_candidate_each( + block_hash, + fork_block_hash, + slot, + sync_oracle_handle, + candidate_receipt, + ) + .await; + + chain_builder.build(&mut virtual_overseer).await; + + futures_timer::Delay::new(Duration::from_millis(2000)).await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request( + _, + RuntimeApiRequest::SessionInfo(_, si_tx), + ) + ) => { + si_tx.send(Ok(Some(session_info.clone()))).unwrap(); + } + ); + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request( + _, + RuntimeApiRequest::SessionExecutorParams(_, si_tx), + ) + ) => { + // Make sure all SessionExecutorParams calls are not made for the leaf (but for its relay parent) + si_tx.send(Ok(Some(ExecutorParams::default()))).unwrap(); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(_, si_tx), ) + ) => { + si_tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); + } + ); + + // On major syncing ending Approval voting should send all the necessary messages for a + // candidate to be approved. + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::NewBlocks( + _, + )) => { + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + _, + )) => { + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + _, + )) => { + } + ); + + // Guarantees the approval work has been relaunched. + recover_available_data(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive { + exec_kind, + response_sender, + .. + }) if exec_kind == PvfExecKind::Approval => { + response_sender.send(Ok(ValidationResult::Valid(Default::default(), Default::default()))) + .unwrap(); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: 1, + })); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(_)) + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: 1, + })); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(_)) + ); + + // Assert that there are no more messages being sent by the subsystem + assert!(overseer_recv(&mut virtual_overseer).timeout(TIMEOUT / 2).await.is_none()); + + virtual_overseer + }); +} + +// Test that cached approvals, which are candidates that we approved but we didn't issue +// the signature yet because we want to coalesce it with more candidate are sent after restart. +#[test] +fn subsystem_sends_pending_approvals_on_approval_restart() { + let assignment_criteria = Box::new(MockAssignmentCriteria( + || { + let mut assignments = HashMap::new(); + let _ = assignments.insert( + CoreIndex(0), + approval_db::v2::OurAssignment { + cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0 }) + .into(), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + + let _ = assignments.insert( + CoreIndex(0), + approval_db::v2::OurAssignment { + cert: garbage_assignment_cert_v2(AssignmentCertKindV2::RelayVRFModuloCompact { + core_bitfield: vec![CoreIndex(0), CoreIndex(1), CoreIndex(2)] + .try_into() + .unwrap(), + }), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + assignments + }, + |_| Ok(0), + )); + let config = HarnessConfigBuilder::default().assignment_criteria(assignment_criteria).build(); + let store = config.backend(); + let store_clone = config.backend(); + + test_harness(config, |test_harness| async move { + let TestHarness { mut virtual_overseer, clock, sync_oracle_handle } = test_harness; + + setup_overseer_with_two_blocks_each_with_one_assignment_triggered( + &mut virtual_overseer, + store, + &clock, + sync_oracle_handle, + ) + .await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + _, + )) => { + } + ); + + recover_available_data(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + _ + )) => { + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive { + exec_kind, + response_sender, + .. + }) if exec_kind == PvfExecKind::Approval => { + response_sender.send(Ok(ValidationResult::Valid(Default::default(), Default::default()))) + .unwrap(); + } + ); + + // Configure a big coalesce number, so that the signature is cached instead of being sent to + // approval-distribution. + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: 6, + })); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: 6, + })); + } + ); + + // Assert that there are no more messages being sent by the subsystem + assert!(overseer_recv(&mut virtual_overseer).timeout(TIMEOUT / 2).await.is_none()); + + virtual_overseer + }); + + let config = HarnessConfigBuilder::default().backend(store_clone).major_syncing(true).build(); + // On restart signatures should be sent to approval-distribution without relaunching the + // approval work. + test_harness(config, |test_harness| async move { + let TestHarness { mut virtual_overseer, clock, sync_oracle_handle } = test_harness; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber(rx)) => { + rx.send(Ok(0)).unwrap(); + } + ); + + let block_hash = Hash::repeat_byte(0x01); + let fork_block_hash = Hash::repeat_byte(0x02); + let candidate_commitments = CandidateCommitments::default(); + let mut candidate_receipt = dummy_candidate_receipt(block_hash); + candidate_receipt.commitments_hash = candidate_commitments.hash(); + let slot = Slot::from(1); + + clock.inner.lock().set_tick(slot_to_tick(slot + 2)); + let (chain_builder, session_info) = build_chain_with_two_blocks_with_one_candidate_each( + block_hash, + fork_block_hash, + slot, + sync_oracle_handle, + candidate_receipt, + ) + .await; + chain_builder.build(&mut virtual_overseer).await; + + futures_timer::Delay::new(Duration::from_millis(2000)).await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::NewBlocks( + _, + )) => { + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + _, + )) => { + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + _, + )) => { + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: 1, + })); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request( + _, + RuntimeApiRequest::SessionInfo(_, si_tx), + ) + ) => { + si_tx.send(Ok(Some(session_info.clone()))).unwrap(); + } + ); + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request( + _, + RuntimeApiRequest::SessionExecutorParams(_, si_tx), + ) + ) => { + // Make sure all SessionExecutorParams calls are not made for the leaf (but for its relay parent) + si_tx.send(Ok(Some(ExecutorParams::default()))).unwrap(); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(_, si_tx), ) + ) => { + si_tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(_)) + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: 1, + })); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(_)) + ); + + // Assert that there are no more messages being sent by the subsystem + assert!(overseer_recv(&mut virtual_overseer).timeout(TIMEOUT / 2).await.is_none()); + + virtual_overseer + }); +} diff --git a/polkadot/node/core/av-store/Cargo.toml b/polkadot/node/core/av-store/Cargo.toml index 4f5c18a68d6c378c5c82e3c49c6f3d00c149b18b..05212da7479848830a8fb70ee4c38d449e9a6a7e 100644 --- a/polkadot/node/core/av-store/Cargo.toml +++ b/polkadot/node/core/av-store/Cargo.toml @@ -13,7 +13,7 @@ workspace = true futures = "0.3.21" futures-timer = "3.0.2" kvdb = "0.13.0" -thiserror = "1.0.48" +thiserror = { workspace = true } gum = { package = "tracing-gum", path = "../../gum" } bitvec = "1.0.0" diff --git a/polkadot/node/core/backing/Cargo.toml b/polkadot/node/core/backing/Cargo.toml index 1c69fc441b3284d1790e03f6896b67e71f32aae3..d0c1f9aa4832dfe629e1fb20d3082f27c260a7d5 100644 --- a/polkadot/node/core/backing/Cargo.toml +++ b/polkadot/node/core/backing/Cargo.toml @@ -20,8 +20,9 @@ erasure-coding = { package = "polkadot-erasure-coding", path = "../../../erasure statement-table = { package = "polkadot-statement-table", path = "../../../statement-table" } bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } gum = { package = "tracing-gum", path = "../../gum" } -thiserror = "1.0.48" +thiserror = { workspace = true } fatality = "0.0.6" +schnellru = "0.2.1" [dev-dependencies] sp-core = { path = "../../../../substrate/primitives/core" } @@ -31,5 +32,6 @@ sc-keystore = { path = "../../../../substrate/client/keystore" } sp-tracing = { path = "../../../../substrate/primitives/tracing" } futures = { version = "0.3.21", features = ["thread-pool"] } assert_matches = "1.4.0" +rstest = "0.18.2" polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../../primitives/test-helpers" } diff --git a/polkadot/node/core/backing/src/error.rs b/polkadot/node/core/backing/src/error.rs index 1b00a62510b7c634b3135a56ca6412c0948ef6a0..64955a393962076bd7b202dd31a7d69d46a4d1b9 100644 --- a/polkadot/node/core/backing/src/error.rs +++ b/polkadot/node/core/backing/src/error.rs @@ -48,6 +48,9 @@ pub enum Error { #[error("Candidate is not found")] CandidateNotFound, + #[error("CoreIndex cannot be determined for a candidate")] + CoreIndexUnavailable, + #[error("Signature is invalid")] InvalidSignature, diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index 98bbd6232add4d893293fc948c0a99bba8d46bf3..69bf2e956a0f6bb7f3b9f4a19446e5e7d2c125bb 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -77,6 +77,7 @@ use futures::{ stream::FuturesOrdered, FutureExt, SinkExt, StreamExt, TryFutureExt, }; +use schnellru::{ByLength, LruMap}; use error::{Error, FatalResult}; use polkadot_node_primitives::{ @@ -104,10 +105,12 @@ use polkadot_node_subsystem_util::{ Validator, }; use polkadot_primitives::{ + vstaging::{node_features::FeatureIndex, NodeFeatures}, BackedCandidate, CandidateCommitments, CandidateHash, CandidateReceipt, - CommittedCandidateReceipt, CoreIndex, CoreState, ExecutorParams, Hash, Id as ParaId, - PersistedValidationData, PvfExecKind, SigningContext, ValidationCode, ValidatorId, - ValidatorIndex, ValidatorSignature, ValidityAttestation, + CommittedCandidateReceipt, CoreIndex, CoreState, ExecutorParams, GroupIndex, GroupRotationInfo, + Hash, Id as ParaId, IndexedVec, PersistedValidationData, PvfExecKind, SessionIndex, + SigningContext, ValidationCode, ValidatorId, ValidatorIndex, ValidatorSignature, + ValidityAttestation, }; use sp_keystore::KeystorePtr; use statement_table::{ @@ -118,7 +121,7 @@ use statement_table::{ }, Config as TableConfig, Context as TableContextTrait, Table, }; -use util::vstaging::get_disabled_validators_with_fallback; +use util::{runtime::request_node_features, vstaging::get_disabled_validators_with_fallback}; mod error; @@ -209,7 +212,9 @@ struct PerRelayParentState { /// The hash of the relay parent on top of which this job is doing it's work. parent: Hash, /// The `ParaId` assigned to the local validator at this relay parent. - assignment: Option, + assigned_para: Option, + /// The `CoreIndex` assigned to the local validator at this relay parent. + assigned_core: Option, /// The candidates that are backed by enough validators in their group, by hash. backed: HashSet, /// The table of candidates and statements under this relay-parent. @@ -224,6 +229,15 @@ struct PerRelayParentState { fallbacks: HashMap, /// The minimum backing votes threshold. minimum_backing_votes: u32, + /// If true, we're appending extra bits in the BackedCandidate validator indices bitfield, + /// which represent the assigned core index. True if ElasticScalingMVP is enabled. + inject_core_index: bool, + /// The core states for all cores. + cores: Vec, + /// The validator index -> group mapping at this relay parent. + validator_to_group: Arc>>, + /// The associated group rotation information. + group_rotation_info: GroupRotationInfo, } struct PerCandidateState { @@ -275,6 +289,9 @@ struct State { /// This is guaranteed to have an entry for each candidate with a relay parent in the implicit /// or explicit view for which a `Seconded` statement has been successfully imported. per_candidate: HashMap, + /// Cache the per-session Validator->Group mapping. + validator_to_group_cache: + LruMap>>>, /// A cloneable sender which is dispatched to background candidate validation tasks to inform /// the main task of the result. background_validation_tx: mpsc::Sender<(Hash, ValidatedCandidateCommand)>, @@ -292,6 +309,7 @@ impl State { per_leaf: HashMap::default(), per_relay_parent: HashMap::default(), per_candidate: HashMap::new(), + validator_to_group_cache: LruMap::new(ByLength::new(2)), background_validation_tx, keystore, } @@ -379,10 +397,10 @@ struct AttestingData { backing: Vec, } -#[derive(Default)] +#[derive(Default, Debug)] struct TableContext { validator: Option, - groups: HashMap>, + groups: HashMap>, validators: Vec, disabled_validators: Vec, } @@ -404,7 +422,7 @@ impl TableContext { impl TableContextTrait for TableContext { type AuthorityId = ValidatorIndex; type Digest = CandidateHash; - type GroupId = ParaId; + type GroupId = CoreIndex; type Signature = ValidatorSignature; type Candidate = CommittedCandidateReceipt; @@ -412,15 +430,11 @@ impl TableContextTrait for TableContext { candidate.hash() } - fn candidate_group(candidate: &CommittedCandidateReceipt) -> ParaId { - candidate.descriptor().para_id + fn is_member_of(&self, authority: &ValidatorIndex, core: &CoreIndex) -> bool { + self.groups.get(core).map_or(false, |g| g.iter().any(|a| a == authority)) } - fn is_member_of(&self, authority: &ValidatorIndex, group: &ParaId) -> bool { - self.groups.get(group).map_or(false, |g| g.iter().any(|a| a == authority)) - } - - fn get_group_size(&self, group: &ParaId) -> Option { + fn get_group_size(&self, group: &CoreIndex) -> Option { self.groups.get(group).map(|g| g.len()) } } @@ -442,19 +456,20 @@ fn primitive_statement_to_table(s: &SignedFullStatementWithPVD) -> TableSignedSt fn table_attested_to_backed( attested: TableAttestedCandidate< - ParaId, + CoreIndex, CommittedCandidateReceipt, ValidatorIndex, ValidatorSignature, >, table_context: &TableContext, + inject_core_index: bool, ) -> Option { - let TableAttestedCandidate { candidate, validity_votes, group_id: para_id } = attested; + let TableAttestedCandidate { candidate, validity_votes, group_id: core_index } = attested; let (ids, validity_votes): (Vec<_>, Vec) = validity_votes.into_iter().map(|(id, vote)| (id, vote.into())).unzip(); - let group = table_context.groups.get(¶_id)?; + let group = table_context.groups.get(&core_index)?; let mut validator_indices = BitVec::with_capacity(group.len()); @@ -479,14 +494,15 @@ fn table_attested_to_backed( } vote_positions.sort_by_key(|(_orig, pos_in_group)| *pos_in_group); - Some(BackedCandidate { + Some(BackedCandidate::new( candidate, - validity_votes: vote_positions + vote_positions .into_iter() .map(|(pos_in_votes, _pos_in_group)| validity_votes[pos_in_votes].clone()) .collect(), validator_indices, - }) + inject_core_index.then_some(core_index), + )) } async fn store_available_data( @@ -971,7 +987,14 @@ async fn handle_active_leaves_update( // construct a `PerRelayParent` from the runtime API // and insert it. - let per = construct_per_relay_parent_state(ctx, maybe_new, &state.keystore, mode).await?; + let per = construct_per_relay_parent_state( + ctx, + maybe_new, + &state.keystore, + &mut state.validator_to_group_cache, + mode, + ) + .await?; if let Some(per) = per { state.per_relay_parent.insert(maybe_new, per); @@ -981,31 +1004,112 @@ async fn handle_active_leaves_update( Ok(()) } +macro_rules! try_runtime_api { + ($x: expr) => { + match $x { + Ok(x) => x, + Err(err) => { + // Only bubble up fatal errors. + error::log_error(Err(Into::::into(err).into()))?; + + // We can't do candidate validation work if we don't have the + // requisite runtime API data. But these errors should not take + // down the node. + return Ok(None) + }, + } + }; +} + +fn core_index_from_statement( + validator_to_group: &IndexedVec>, + group_rotation_info: &GroupRotationInfo, + cores: &[CoreState], + statement: &SignedFullStatementWithPVD, +) -> Option { + let compact_statement = statement.as_unchecked(); + let candidate_hash = CandidateHash(*compact_statement.unchecked_payload().candidate_hash()); + + let n_cores = cores.len(); + + gum::trace!( + target:LOG_TARGET, + ?group_rotation_info, + ?statement, + ?validator_to_group, + n_cores = ?cores.len(), + ?candidate_hash, + "Extracting core index from statement" + ); + + let statement_validator_index = statement.validator_index(); + let Some(Some(group_index)) = validator_to_group.get(statement_validator_index) else { + gum::debug!( + target: LOG_TARGET, + ?group_rotation_info, + ?statement, + ?validator_to_group, + n_cores = ?cores.len() , + ?candidate_hash, + "Invalid validator index: {:?}", + statement_validator_index + ); + return None + }; + + // First check if the statement para id matches the core assignment. + let core_index = group_rotation_info.core_for_group(*group_index, n_cores); + + if core_index.0 as usize > n_cores { + gum::warn!(target: LOG_TARGET, ?candidate_hash, ?core_index, n_cores, "Invalid CoreIndex"); + return None + } + + if let StatementWithPVD::Seconded(candidate, _pvd) = statement.payload() { + let candidate_para_id = candidate.descriptor.para_id; + let assigned_para_id = match &cores[core_index.0 as usize] { + CoreState::Free => { + gum::debug!(target: LOG_TARGET, ?candidate_hash, "Invalid CoreIndex, core is not assigned to any para_id"); + return None + }, + CoreState::Occupied(occupied) => + if let Some(next) = &occupied.next_up_on_available { + next.para_id + } else { + return None + }, + CoreState::Scheduled(scheduled) => scheduled.para_id, + }; + + if assigned_para_id != candidate_para_id { + gum::debug!( + target: LOG_TARGET, + ?candidate_hash, + ?core_index, + ?assigned_para_id, + ?candidate_para_id, + "Invalid CoreIndex, core is assigned to a different para_id" + ); + return None + } + return Some(core_index) + } else { + return Some(core_index) + } +} + /// Load the data necessary to do backing work on top of a relay-parent. #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] async fn construct_per_relay_parent_state( ctx: &mut Context, relay_parent: Hash, keystore: &KeystorePtr, + validator_to_group_cache: &mut LruMap< + SessionIndex, + Arc>>, + >, mode: ProspectiveParachainsMode, ) -> Result, Error> { - macro_rules! try_runtime_api { - ($x: expr) => { - match $x { - Ok(x) => x, - Err(err) => { - // Only bubble up fatal errors. - error::log_error(Err(Into::::into(err).into()))?; - - // We can't do candidate validation work if we don't have the - // requisite runtime API data. But these errors should not take - // down the node. - return Ok(None) - }, - } - }; - } - let parent = relay_parent; let (session_index, validators, groups, cores) = futures::try_join!( @@ -1020,6 +1124,16 @@ async fn construct_per_relay_parent_state( .map_err(Error::JoinMultiple)?; let session_index = try_runtime_api!(session_index); + + let inject_core_index = request_node_features(parent, session_index, ctx.sender()) + .await? + .unwrap_or(NodeFeatures::EMPTY) + .get(FeatureIndex::ElasticScalingMVP as usize) + .map(|b| *b) + .unwrap_or(false); + + gum::debug!(target: LOG_TARGET, inject_core_index, ?parent, "New state"); + let validators: Vec<_> = try_runtime_api!(validators); let (validator_groups, group_rotation_info) = try_runtime_api!(groups); let cores = try_runtime_api!(cores); @@ -1055,18 +1169,24 @@ async fn construct_per_relay_parent_state( }, }; - let mut groups = HashMap::new(); let n_cores = cores.len(); - let mut assignment = None; - for (idx, core) in cores.into_iter().enumerate() { + let mut groups = HashMap::>::new(); + let mut assigned_core = None; + let mut assigned_para = None; + + for (idx, core) in cores.iter().enumerate() { let core_para_id = match core { CoreState::Scheduled(scheduled) => scheduled.para_id, CoreState::Occupied(occupied) => if mode.is_enabled() { // Async backing makes it legal to build on top of // occupied core. - occupied.candidate_descriptor.para_id + if let Some(next) = &occupied.next_up_on_available { + next.para_id + } else { + continue + } } else { continue }, @@ -1077,11 +1197,27 @@ async fn construct_per_relay_parent_state( let group_index = group_rotation_info.group_for_core(core_index, n_cores); if let Some(g) = validator_groups.get(group_index.0 as usize) { if validator.as_ref().map_or(false, |v| g.contains(&v.index())) { - assignment = Some(core_para_id); + assigned_para = Some(core_para_id); + assigned_core = Some(core_index); } - groups.insert(core_para_id, g.clone()); + groups.insert(core_index, g.clone()); } } + gum::debug!(target: LOG_TARGET, ?groups, "TableContext"); + + let validator_to_group = validator_to_group_cache + .get_or_insert(session_index, || { + let mut vector = vec![None; validators.len()]; + + for (group_idx, validator_group) in validator_groups.iter().enumerate() { + for validator in validator_group { + vector[validator.0 as usize] = Some(GroupIndex(group_idx as u32)); + } + } + + Arc::new(IndexedVec::<_, _>::from(vector)) + }) + .expect("Just inserted"); let table_context = TableContext { validator, groups, validators, disabled_validators }; let table_config = TableConfig { @@ -1094,7 +1230,8 @@ async fn construct_per_relay_parent_state( Ok(Some(PerRelayParentState { prospective_parachains_mode: mode, parent, - assignment, + assigned_core, + assigned_para, backed: HashSet::new(), table: Table::new(table_config), table_context, @@ -1102,6 +1239,10 @@ async fn construct_per_relay_parent_state( awaiting_validation: HashSet::new(), fallbacks: HashMap::new(), minimum_backing_votes, + inject_core_index, + cores, + validator_to_group: validator_to_group.clone(), + group_rotation_info, })) } @@ -1519,15 +1660,16 @@ async fn import_statement( per_candidate: &mut HashMap, statement: &SignedFullStatementWithPVD, ) -> Result, Error> { + let candidate_hash = statement.payload().candidate_hash(); + gum::debug!( target: LOG_TARGET, statement = ?statement.payload().to_compact(), validator_index = statement.validator_index().0, + ?candidate_hash, "Importing statement", ); - let candidate_hash = statement.payload().candidate_hash(); - // If this is a new candidate (statement is 'seconded' and candidate is unknown), // we need to create an entry in the `PerCandidateState` map. // @@ -1593,7 +1735,15 @@ async fn import_statement( let stmt = primitive_statement_to_table(statement); - Ok(rp_state.table.import_statement(&rp_state.table_context, stmt)) + let core = core_index_from_statement( + &rp_state.validator_to_group, + &rp_state.group_rotation_info, + &rp_state.cores, + statement, + ) + .ok_or(Error::CoreIndexUnavailable)?; + + Ok(rp_state.table.import_statement(&rp_state.table_context, core, stmt)) } /// Handles a summary received from [`import_statement`] and dispatches `Backed` notifications and @@ -1615,8 +1765,12 @@ async fn post_import_statement_actions( // `HashSet::insert` returns true if the thing wasn't in there already. if rp_state.backed.insert(candidate_hash) { - if let Some(backed) = table_attested_to_backed(attested, &rp_state.table_context) { - let para_id = backed.candidate.descriptor.para_id; + if let Some(backed) = table_attested_to_backed( + attested, + &rp_state.table_context, + rp_state.inject_core_index, + ) { + let para_id = backed.candidate().descriptor.para_id; gum::debug!( target: LOG_TARGET, candidate_hash = ?candidate_hash, @@ -1637,7 +1791,7 @@ async fn post_import_statement_actions( // notify collator protocol. ctx.send_message(CollatorProtocolMessage::Backed { para_id, - para_head: backed.candidate.descriptor.para_head, + para_head: backed.candidate().descriptor.para_head, }) .await; // Notify statement distribution of backed candidate. @@ -1654,8 +1808,14 @@ async fn post_import_statement_actions( ); ctx.send_unbounded_message(message); } + } else { + gum::debug!(target: LOG_TARGET, ?candidate_hash, "Cannot get BackedCandidate"); } + } else { + gum::debug!(target: LOG_TARGET, ?candidate_hash, "Candidate already known"); } + } else { + gum::debug!(target: LOG_TARGET, "No attested candidate"); } issue_new_misbehaviors(ctx, rp_state.parent, &mut rp_state.table); @@ -1722,18 +1882,20 @@ async fn background_validate_and_make_available( if rp_state.awaiting_validation.insert(candidate_hash) { // spawn background task. let bg = async move { - if let Err(e) = validate_and_make_available(params).await { - if let Error::BackgroundValidationMpsc(error) = e { + if let Err(error) = validate_and_make_available(params).await { + if let Error::BackgroundValidationMpsc(error) = error { gum::debug!( target: LOG_TARGET, + ?candidate_hash, ?error, "Mpsc background validation mpsc died during validation- leaf no longer active?" ); } else { gum::error!( target: LOG_TARGET, - "Failed to validate and make available: {:?}", - e + ?candidate_hash, + ?error, + "Failed to validate and make available", ); } } @@ -1859,9 +2021,10 @@ async fn maybe_validate_and_import( let candidate_hash = summary.candidate; - if Some(summary.group_id) != rp_state.assignment { + if Some(summary.group_id) != rp_state.assigned_core { return Ok(()) } + let attesting = match statement.payload() { StatementWithPVD::Seconded(receipt, _) => { let attesting = AttestingData { @@ -2004,10 +2167,11 @@ async fn handle_second_message( } // Sanity check that candidate is from our assignment. - if Some(candidate.descriptor().para_id) != rp_state.assignment { + if Some(candidate.descriptor().para_id) != rp_state.assigned_para { gum::debug!( target: LOG_TARGET, - our_assignment = ?rp_state.assignment, + our_assignment_core = ?rp_state.assigned_core, + our_assignment_para = ?rp_state.assigned_para, collation = ?candidate.descriptor().para_id, "Subsystem asked to second for para outside of our assignment", ); @@ -2015,6 +2179,14 @@ async fn handle_second_message( return Ok(()) } + gum::debug!( + target: LOG_TARGET, + our_assignment_core = ?rp_state.assigned_core, + our_assignment_para = ?rp_state.assigned_para, + collation = ?candidate.descriptor().para_id, + "Current assignments vs collation", + ); + // If the message is a `CandidateBackingMessage::Second`, sign and dispatch a // Seconded statement only if we have not signed a Valid statement for the requested candidate. // @@ -2087,7 +2259,13 @@ fn handle_get_backed_candidates_message( &rp_state.table_context, rp_state.minimum_backing_votes, ) - .and_then(|attested| table_attested_to_backed(attested, &rp_state.table_context)) + .and_then(|attested| { + table_attested_to_backed( + attested, + &rp_state.table_context, + rp_state.inject_core_index, + ) + }) }) .collect(); diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs index 1957f4e19c54bd9845e5bb5bd03383985d9c9636..e3cc5727435a3fef3532a9de342cd194ecd4fe3a 100644 --- a/polkadot/node/core/backing/src/tests/mod.rs +++ b/polkadot/node/core/backing/src/tests/mod.rs @@ -33,9 +33,10 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_primitives::{ - CandidateDescriptor, GroupRotationInfo, HeadData, PersistedValidationData, PvfExecKind, - ScheduledCore, SessionIndex, LEGACY_MIN_BACKING_VOTES, + vstaging::node_features, CandidateDescriptor, GroupRotationInfo, HeadData, + PersistedValidationData, PvfExecKind, ScheduledCore, SessionIndex, LEGACY_MIN_BACKING_VOTES, }; +use rstest::rstest; use sp_application_crypto::AppCrypto; use sp_keyring::Sr25519Keyring; use sp_keystore::Keystore; @@ -65,19 +66,21 @@ fn dummy_pvd() -> PersistedValidationData { } } -struct TestState { +pub(crate) struct TestState { chain_ids: Vec, keystore: KeystorePtr, validators: Vec, validator_public: Vec, validation_data: PersistedValidationData, validator_groups: (Vec>, GroupRotationInfo), + validator_to_group: IndexedVec>, availability_cores: Vec, head_data: HashMap, signing_context: SigningContext, relay_parent: Hash, minimum_backing_votes: u32, disabled_validators: Vec, + node_features: NodeFeatures, } impl TestState { @@ -114,6 +117,11 @@ impl Default for TestState { .into_iter() .map(|g| g.into_iter().map(ValidatorIndex).collect()) .collect(); + let validator_to_group: IndexedVec<_, _> = + vec![Some(0), Some(1), Some(0), Some(0), None, Some(0)] + .into_iter() + .map(|x| x.map(|x| GroupIndex(x))) + .collect(); let group_rotation_info = GroupRotationInfo { session_start_block: 0, group_rotation_frequency: 100, now: 1 }; @@ -143,6 +151,7 @@ impl Default for TestState { validators, validator_public, validator_groups: (validator_groups, group_rotation_info), + validator_to_group, availability_cores, head_data, validation_data, @@ -150,6 +159,7 @@ impl Default for TestState { relay_parent, minimum_backing_votes: LEGACY_MIN_BACKING_VOTES, disabled_validators: Vec::new(), + node_features: Default::default(), } } } @@ -285,6 +295,16 @@ async fn test_startup(virtual_overseer: &mut VirtualOverseer, test_state: &TestS } ); + // Node features request from runtime: all features are disabled. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_parent, RuntimeApiRequest::NodeFeatures(_session_index, tx)) + ) => { + tx.send(Ok(test_state.node_features.clone())).unwrap(); + } + ); + // Check if subsystem job issues a request for the minimum backing votes. assert_matches!( virtual_overseer.recv().await, @@ -477,9 +497,20 @@ fn backing_second_works() { } // Test that the candidate reaches quorum successfully. -#[test] -fn backing_works() { - let test_state = TestState::default(); +#[rstest] +#[case(true)] +#[case(false)] +fn backing_works(#[case] elastic_scaling_mvp: bool) { + let mut test_state = TestState::default(); + if elastic_scaling_mvp { + test_state + .node_features + .resize((node_features::FeatureIndex::ElasticScalingMVP as u8 + 1) as usize, false); + test_state + .node_features + .set(node_features::FeatureIndex::ElasticScalingMVP as u8 as usize, true); + } + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { test_startup(&mut virtual_overseer, &test_state).await; @@ -630,6 +661,31 @@ fn backing_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + let (tx, rx) = oneshot::channel(); + let msg = CandidateBackingMessage::GetBackedCandidates( + vec![(candidate_a_hash, test_state.relay_parent)], + tx, + ); + + virtual_overseer.send(FromOrchestra::Communication { msg }).await; + + let candidates = rx.await.unwrap(); + assert_eq!(1, candidates.len()); + assert_eq!(candidates[0].validity_votes().len(), 3); + + let (validator_indices, maybe_core_index) = + candidates[0].validator_indices_and_core_index(elastic_scaling_mvp); + if elastic_scaling_mvp { + assert_eq!(maybe_core_index.unwrap(), CoreIndex(0)); + } else { + assert!(maybe_core_index.is_none()); + } + + assert_eq!( + validator_indices, + bitvec::bitvec![u8, bitvec::order::Lsb0; 1, 1, 0, 1].as_bitslice() + ); + virtual_overseer .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves( ActiveLeavesUpdate::stop_work(test_state.relay_parent), @@ -639,6 +695,107 @@ fn backing_works() { }); } +#[test] +fn extract_core_index_from_statement_works() { + let test_state = TestState::default(); + + let pov_a = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd_a = dummy_pvd(); + let validation_code_a = ValidationCode(vec![1, 2, 3]); + + let pov_hash = pov_a.hash(); + + let mut candidate = TestCandidateBuilder { + para_id: test_state.chain_ids[0], + relay_parent: test_state.relay_parent, + pov_hash, + erasure_root: make_erasure_root(&test_state, pov_a.clone(), pvd_a.clone()), + persisted_validation_data_hash: pvd_a.hash(), + validation_code: validation_code_a.0.clone(), + ..Default::default() + } + .build(); + + let public2 = Keystore::sr25519_generate_new( + &*test_state.keystore, + ValidatorId::ID, + Some(&test_state.validators[2].to_seed()), + ) + .expect("Insert key into keystore"); + + let signed_statement_1 = SignedFullStatementWithPVD::sign( + &test_state.keystore, + StatementWithPVD::Seconded(candidate.clone(), pvd_a.clone()), + &test_state.signing_context, + ValidatorIndex(2), + &public2.into(), + ) + .ok() + .flatten() + .expect("should be signed"); + + let public1 = Keystore::sr25519_generate_new( + &*test_state.keystore, + ValidatorId::ID, + Some(&test_state.validators[1].to_seed()), + ) + .expect("Insert key into keystore"); + + let signed_statement_2 = SignedFullStatementWithPVD::sign( + &test_state.keystore, + StatementWithPVD::Seconded(candidate.clone(), pvd_a.clone()), + &test_state.signing_context, + ValidatorIndex(1), + &public1.into(), + ) + .ok() + .flatten() + .expect("should be signed"); + + candidate.descriptor.para_id = test_state.chain_ids[1]; + + let signed_statement_3 = SignedFullStatementWithPVD::sign( + &test_state.keystore, + StatementWithPVD::Seconded(candidate, pvd_a.clone()), + &test_state.signing_context, + ValidatorIndex(1), + &public1.into(), + ) + .ok() + .flatten() + .expect("should be signed"); + + let core_index_1 = core_index_from_statement( + &test_state.validator_to_group, + &test_state.validator_groups.1, + &test_state.availability_cores, + &signed_statement_1, + ) + .unwrap(); + + assert_eq!(core_index_1, CoreIndex(0)); + + let core_index_2 = core_index_from_statement( + &test_state.validator_to_group, + &test_state.validator_groups.1, + &test_state.availability_cores, + &signed_statement_2, + ); + + // Must be none, para_id in descriptor is different than para assigned to core + assert_eq!(core_index_2, None); + + let core_index_3 = core_index_from_statement( + &test_state.validator_to_group, + &test_state.validator_groups.1, + &test_state.availability_cores, + &signed_statement_3, + ) + .unwrap(); + + assert_eq!(core_index_3, CoreIndex(1)); +} + #[test] fn backing_works_while_validation_ongoing() { let test_state = TestState::default(); @@ -801,20 +958,20 @@ fn backing_works_while_validation_ongoing() { let candidates = rx.await.unwrap(); assert_eq!(1, candidates.len()); - assert_eq!(candidates[0].validity_votes.len(), 3); + assert_eq!(candidates[0].validity_votes().len(), 3); assert!(candidates[0] - .validity_votes + .validity_votes() .contains(&ValidityAttestation::Implicit(signed_a.signature().clone()))); assert!(candidates[0] - .validity_votes + .validity_votes() .contains(&ValidityAttestation::Explicit(signed_b.signature().clone()))); assert!(candidates[0] - .validity_votes + .validity_votes() .contains(&ValidityAttestation::Explicit(signed_c.signature().clone()))); assert_eq!( - candidates[0].validator_indices, - bitvec::bitvec![u8, bitvec::order::Lsb0; 1, 0, 1, 1], + candidates[0].validator_indices_and_core_index(false), + (bitvec::bitvec![u8, bitvec::order::Lsb0; 1, 0, 1, 1].as_bitslice(), None) ); virtual_overseer @@ -1422,7 +1579,7 @@ fn backing_works_after_failed_validation() { fn candidate_backing_reorders_votes() { use sp_core::Encode; - let para_id = ParaId::from(10); + let core_idx = CoreIndex(10); let validators = vec![ Sr25519Keyring::Alice, Sr25519Keyring::Bob, @@ -1436,7 +1593,7 @@ fn candidate_backing_reorders_votes() { let validator_groups = { let mut validator_groups = HashMap::new(); validator_groups - .insert(para_id, vec![0, 1, 2, 3, 4, 5].into_iter().map(ValidatorIndex).collect()); + .insert(core_idx, vec![0, 1, 2, 3, 4, 5].into_iter().map(ValidatorIndex).collect()); validator_groups }; @@ -1466,10 +1623,10 @@ fn candidate_backing_reorders_votes() { (ValidatorIndex(3), fake_attestation(3)), (ValidatorIndex(1), fake_attestation(1)), ], - group_id: para_id, + group_id: core_idx, }; - let backed = table_attested_to_backed(attested, &table_context).unwrap(); + let backed = table_attested_to_backed(attested, &table_context, false).unwrap(); let expected_bitvec = { let mut validator_indices = BitVec::::with_capacity(6); @@ -1486,8 +1643,11 @@ fn candidate_backing_reorders_votes() { let expected_attestations = vec![fake_attestation(1).into(), fake_attestation(3).into(), fake_attestation(5).into()]; - assert_eq!(backed.validator_indices, expected_bitvec); - assert_eq!(backed.validity_votes, expected_attestations); + assert_eq!( + backed.validator_indices_and_core_index(false), + (expected_bitvec.as_bitslice(), None) + ); + assert_eq!(backed.validity_votes(), expected_attestations); } // Test whether we retry on failed PoV fetching. diff --git a/polkadot/node/core/backing/src/tests/prospective_parachains.rs b/polkadot/node/core/backing/src/tests/prospective_parachains.rs index 578f21bef66515e49042d7a11692de67b9642d41..94310d2aa164650db84b78ddf361a9f465ac207d 100644 --- a/polkadot/node/core/backing/src/tests/prospective_parachains.rs +++ b/polkadot/node/core/backing/src/tests/prospective_parachains.rs @@ -185,6 +185,16 @@ async fn activate_leaf( } ); + // Node features request from runtime: all features are disabled. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::NodeFeatures(_session_index, tx)) + ) if parent == hash => { + tx.send(Ok(Default::default())).unwrap(); + } + ); + // Check if subsystem job issues a request for the minimum backing votes. assert_matches!( virtual_overseer.recv().await, @@ -305,10 +315,11 @@ async fn assert_hypothetical_frontier_requests( ) => { let idx = match expected_requests.iter().position(|r| r.0 == request) { Some(idx) => idx, - None => panic!( + None => + panic!( "unexpected hypothetical frontier request, no match found for {:?}", request - ), + ), }; let resp = std::mem::take(&mut expected_requests[idx].1); tx.send(resp).unwrap(); @@ -1268,6 +1279,7 @@ fn concurrent_dependent_candidates() { let statement_b = CandidateBackingMessage::Statement(leaf_parent, signed_b.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement_a }).await; + // At this point the subsystem waits for response, the previous message is received, // send a second one without blocking. let _ = virtual_overseer @@ -1388,7 +1400,19 @@ fn concurrent_dependent_candidates() { assert_eq!(sess_idx, 1); tx.send(Ok(Some(ExecutorParams::default()))).unwrap(); }, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _parent, + RuntimeApiRequest::ValidatorGroups(tx), + )) => { + tx.send(Ok(test_state.validator_groups.clone())).unwrap(); + }, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _parent, + RuntimeApiRequest::AvailabilityCores(tx), + )) => { + tx.send(Ok(test_state.availability_cores.clone())).unwrap(); + }, _ => panic!("unexpected message received from overseer: {:?}", msg), } } @@ -1419,7 +1443,6 @@ fn seconding_sanity_check_occupy_same_depth() { let leaf_parent = get_parent_hash(leaf_hash); let activated = new_leaf(leaf_hash, LEAF_BLOCK_NUMBER); - let min_block_number = LEAF_BLOCK_NUMBER - LEAF_ANCESTRY_LEN; let min_relay_parents = vec![(para_id_a, min_block_number), (para_id_b, min_block_number)]; let test_leaf_a = TestLeaf { activated, min_relay_parents }; @@ -1555,13 +1578,14 @@ fn occupied_core_assignment() { const LEAF_A_BLOCK_NUMBER: BlockNumber = 100; const LEAF_A_ANCESTRY_LEN: BlockNumber = 3; let para_id = test_state.chain_ids[0]; + let previous_para_id = test_state.chain_ids[1]; // Set the core state to occupied. let mut candidate_descriptor = ::test_helpers::dummy_candidate_descriptor(Hash::zero()); - candidate_descriptor.para_id = para_id; + candidate_descriptor.para_id = previous_para_id; test_state.availability_cores[0] = CoreState::Occupied(OccupiedCore { group_responsible: Default::default(), - next_up_on_available: None, + next_up_on_available: Some(ScheduledCore { para_id, collator: None }), occupied_since: 100_u32, time_out_at: 200_u32, next_up_on_time_out: None, diff --git a/polkadot/node/core/bitfield-signing/Cargo.toml b/polkadot/node/core/bitfield-signing/Cargo.toml index f42e8f8d22ba3d92e799bcd5aebe1cbeedca83de..6ecfffd7249b9213cc4ffb86f84a38e1cdb9babf 100644 --- a/polkadot/node/core/bitfield-signing/Cargo.toml +++ b/polkadot/node/core/bitfield-signing/Cargo.toml @@ -17,7 +17,7 @@ polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } sp-keystore = { path = "../../../../substrate/primitives/keystore" } wasm-timer = "0.2.5" -thiserror = "1.0.48" +thiserror = { workspace = true } [dev-dependencies] polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index bf6e09fd1b69b702206eb52090cb4a12812c00c0..8237137fdca015ace87248d062431230a2ad47d4 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -695,6 +695,8 @@ async fn validate_candidate_exhaustive( ))), Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::JobError(err))) => Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(err))), + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::RuntimeConstruction(err))) => + Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(err))), Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousJobDeath(err))) => Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(format!( @@ -780,40 +782,50 @@ trait ValidationBackend { return validation_result } + macro_rules! break_if_no_retries_left { + ($counter:ident) => { + if $counter > 0 { + $counter -= 1; + } else { + break + } + }; + } + // Allow limited retries for each kind of error. let mut num_death_retries_left = 1; let mut num_job_error_retries_left = 1; let mut num_internal_retries_left = 1; + let mut num_runtime_construction_retries_left = 1; loop { // Stop retrying if we exceeded the timeout. if total_time_start.elapsed() + retry_delay > exec_timeout { break } - + let mut retry_immediately = false; match validation_result { Err(ValidationError::PossiblyInvalid( PossiblyInvalidError::AmbiguousWorkerDeath | PossiblyInvalidError::AmbiguousJobDeath(_), - )) => - if num_death_retries_left > 0 { - num_death_retries_left -= 1; - } else { - break - }, + )) => break_if_no_retries_left!(num_death_retries_left), Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::JobError(_))) => - if num_job_error_retries_left > 0 { - num_job_error_retries_left -= 1; - } else { - break - }, + break_if_no_retries_left!(num_job_error_retries_left), Err(ValidationError::Internal(_)) => - if num_internal_retries_left > 0 { - num_internal_retries_left -= 1; - } else { - break - }, + break_if_no_retries_left!(num_internal_retries_left), + + Err(ValidationError::PossiblyInvalid( + PossiblyInvalidError::RuntimeConstruction(_), + )) => { + break_if_no_retries_left!(num_runtime_construction_retries_left); + self.precheck_pvf(pvf.clone()).await?; + // In this case the error is deterministic + // And a retry forces the ValidationBackend + // to re-prepare the artifact so + // there is no need to wait before the retry + retry_immediately = true; + }, Ok(_) | Err(ValidationError::Invalid(_) | ValidationError::Preparation(_)) => break, } @@ -821,8 +833,11 @@ trait ValidationBackend { // If we got a possibly transient error, retry once after a brief delay, on the // assumption that the conditions that caused this error may have resolved on their own. { - // Wait a brief delay before retrying. - futures_timer::Delay::new(retry_delay).await; + // In case of many transient errors it is necessary to wait a little bit + // for the error to be probably resolved + if !retry_immediately { + futures_timer::Delay::new(retry_delay).await; + } let new_timeout = exec_timeout.saturating_sub(total_time_start.elapsed()); diff --git a/polkadot/node/core/chain-selection/Cargo.toml b/polkadot/node/core/chain-selection/Cargo.toml index 27c2df85235e86d86860496923c3f89ce5fb0199..96fd42785cdd84ee463146f095b06ea01cc7b491 100644 --- a/polkadot/node/core/chain-selection/Cargo.toml +++ b/polkadot/node/core/chain-selection/Cargo.toml @@ -18,7 +18,7 @@ polkadot-node-primitives = { path = "../../primitives" } polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } kvdb = "0.13.0" -thiserror = "1.0.48" +thiserror = { workspace = true } parity-scale-codec = "3.6.1" [dev-dependencies] diff --git a/polkadot/node/core/dispute-coordinator/Cargo.toml b/polkadot/node/core/dispute-coordinator/Cargo.toml index 8a414a35f39cb9e1f3246e1afb83a5ea7a93cff1..1fff0a77170669753fa3fd8d852f936446fe7264 100644 --- a/polkadot/node/core/dispute-coordinator/Cargo.toml +++ b/polkadot/node/core/dispute-coordinator/Cargo.toml @@ -14,7 +14,7 @@ futures = "0.3.21" gum = { package = "tracing-gum", path = "../../gum" } parity-scale-codec = "3.6.1" kvdb = "0.13.0" -thiserror = "1.0.48" +thiserror = { workspace = true } schnellru = "0.2.1" fatality = "0.0.6" diff --git a/polkadot/node/core/dispute-coordinator/src/initialized.rs b/polkadot/node/core/dispute-coordinator/src/initialized.rs index 54e0410268f1b30f1e6627d4b2c89cac2b741ccb..5f86da87f21ca060cfe14de536b39e652d12b7b1 100644 --- a/polkadot/node/core/dispute-coordinator/src/initialized.rs +++ b/polkadot/node/core/dispute-coordinator/src/initialized.rs @@ -99,7 +99,7 @@ pub(crate) struct Initialized { /// This is the highest `SessionIndex` seen via `ActiveLeavesUpdate`. It doesn't matter if it /// was cached successfully or not. It is used to detect ancient disputes. highest_session_seen: SessionIndex, - /// Will be set to `true` if an error occured during the last caching attempt + /// Will be set to `true` if an error occurred during the last caching attempt gaps_in_cache: bool, spam_slots: SpamSlots, participation: Participation, diff --git a/polkadot/node/core/parachains-inherent/Cargo.toml b/polkadot/node/core/parachains-inherent/Cargo.toml index 3b2fa634102dd0537408a041443b301ffe073c08..24da4dc1e316f8469704ab57e4742dbc07197992 100644 --- a/polkadot/node/core/parachains-inherent/Cargo.toml +++ b/polkadot/node/core/parachains-inherent/Cargo.toml @@ -13,7 +13,7 @@ workspace = true futures = "0.3.21" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../../gum" } -thiserror = "1.0.48" +thiserror = { workspace = true } async-trait = "0.1.74" polkadot-node-subsystem = { path = "../../subsystem" } polkadot-overseer = { path = "../../overseer" } diff --git a/polkadot/node/core/prospective-parachains/Cargo.toml b/polkadot/node/core/prospective-parachains/Cargo.toml index 5f0b2c0fdc96beb8c90a3e207307d4c9d5a2619b..f66a66e859ec0a139e31068f78225597d577d1a9 100644 --- a/polkadot/node/core/prospective-parachains/Cargo.toml +++ b/polkadot/node/core/prospective-parachains/Cargo.toml @@ -13,7 +13,7 @@ workspace = true futures = "0.3.19" gum = { package = "tracing-gum", path = "../../gum" } parity-scale-codec = "3.6.4" -thiserror = "1.0.48" +thiserror = { workspace = true } fatality = "0.0.6" bitvec = "1" @@ -23,6 +23,7 @@ polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } [dev-dependencies] +rstest = "0.18.2" assert_matches = "1" polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } polkadot-node-subsystem-types = { path = "../../subsystem-types" } diff --git a/polkadot/node/core/prospective-parachains/src/fragment_tree.rs b/polkadot/node/core/prospective-parachains/src/fragment_tree.rs index 04ee42a9de062bbd183f92c360703eb12c25f1aa..8061dc82d8358ae9e7c741e3f785634601eb5d03 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_tree.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_tree.rs @@ -96,6 +96,7 @@ use std::{ use super::LOG_TARGET; use bitvec::prelude::*; +use polkadot_node_subsystem::messages::Ancestors; use polkadot_node_subsystem_util::inclusion_emulator::{ ConstraintModifications, Constraints, Fragment, ProspectiveCandidate, RelayChainBlockInfo, }; @@ -756,45 +757,46 @@ impl FragmentTree { depths.iter_ones().collect() } - /// Select `count` candidates after the given `required_path` which pass + /// Select `count` candidates after the given `ancestors` which pass /// the predicate and have not already been backed on chain. /// - /// Does an exhaustive search into the tree starting after `required_path`. - /// If there are multiple possibilities of size `count`, this will select the first one. - /// If there is no chain of size `count` that matches the criteria, this will return the largest - /// chain it could find with the criteria. - /// If there are no candidates meeting those criteria, returns an empty `Vec`. - /// Cycles are accepted, see module docs for the `Cycles` section. + /// Does an exhaustive search into the tree after traversing the ancestors path. + /// If the ancestors draw out a path that can be traversed in multiple ways, no + /// candidates will be returned. + /// If the ancestors do not draw out a full path (the path contains holes), candidates will be + /// suggested that may fill these holes. + /// If the ancestors don't draw out a valid path, no candidates will be returned. If there are + /// multiple possibilities of the same size, this will select the first one. If there is no + /// chain of size `count` that matches the criteria, this will return the largest chain it could + /// find with the criteria. If there are no candidates meeting those criteria, returns an empty + /// `Vec`. + /// Cycles are accepted, but this code expects that the runtime will deduplicate + /// identical candidates when occupying the cores (when proposing to back A->B->A, only A will + /// be backed on chain). /// - /// The intention of the `required_path` is to allow queries on the basis of + /// The intention of the `ancestors` is to allow queries on the basis of /// one or more candidates which were previously pending availability becoming - /// available and opening up more room on the core. - pub(crate) fn select_children( + /// available or candidates timing out. + pub(crate) fn find_backable_chain( &self, - required_path: &[CandidateHash], + ancestors: Ancestors, count: u32, pred: impl Fn(&CandidateHash) -> bool, ) -> Vec { - let base_node = { - // traverse the required path. - let mut node = NodePointer::Root; - for required_step in required_path { - if let Some(next_node) = self.node_candidate_child(node, &required_step) { - node = next_node; - } else { - return vec![] - }; - } - - node - }; - - // TODO: taking the first best selection might introduce bias - // or become gameable. - // - // For plausibly unique parachains, this shouldn't matter much. - // figure out alternative selection criteria? - self.select_children_inner(base_node, count, count, &pred, &mut vec![]) + if count == 0 { + return vec![] + } + // First, we need to order the ancestors. + // The node returned is the one from which we can start finding new backable candidates. + let Some(base_node) = self.find_ancestor_path(ancestors) else { return vec![] }; + + self.find_backable_chain_inner( + base_node, + count, + count, + &pred, + &mut Vec::with_capacity(count as usize), + ) } // Try finding a candidate chain starting from `base_node` of length `expected_count`. @@ -805,7 +807,7 @@ impl FragmentTree { // Cycles are accepted, but this doesn't allow for infinite execution time, because the maximum // depth we'll reach is `expected_count`. // - // Worst case performance is `O(num_forks ^ expected_count)`. + // Worst case performance is `O(num_forks ^ expected_count)`, the same as populating the tree. // Although an exponential function, this is actually a constant that can only be altered via // sudo/governance, because: // 1. `num_forks` at a given level is at most `max_candidate_depth * max_validators_per_core` @@ -817,7 +819,7 @@ impl FragmentTree { // scaling scenario). For non-elastic-scaling, this is just 1. In practice, this should be a // small number (1-3), capped by the total number of available cores (a constant alterable // only via governance/sudo). - fn select_children_inner( + fn find_backable_chain_inner( &self, base_node: NodePointer, expected_count: u32, @@ -857,7 +859,7 @@ impl FragmentTree { for (child_ptr, child_hash) in children { accumulator.push(child_hash); - let result = self.select_children_inner( + let result = self.find_backable_chain_inner( child_ptr, expected_count, remaining_count - 1, @@ -869,6 +871,9 @@ impl FragmentTree { // Short-circuit the search if we've found the right length. Otherwise, we'll // search for a max. + // Taking the first best selection doesn't introduce bias or become gameable, + // because `find_ancestor_path` uses a `HashSet` to track the ancestors, which + // makes the order in which ancestors are visited non-deterministic. if result.len() == expected_count as usize { return result } else if best_result.len() < result.len() { @@ -879,6 +884,93 @@ impl FragmentTree { best_result } + // Orders the ancestors into a viable path from root to the last one. + // Returns a pointer to the last node in the path. + // We assume that the ancestors form a chain (that the + // av-cores do not back parachain forks), None is returned otherwise. + // If we cannot use all ancestors, stop at the first found hole in the chain. This usually + // translates to a timed out candidate. + fn find_ancestor_path(&self, mut ancestors: Ancestors) -> Option { + // The number of elements in the path we've processed so far. + let mut depth = 0; + let mut last_node = NodePointer::Root; + let mut next_node: Option = Some(NodePointer::Root); + + while let Some(node) = next_node { + if depth > self.scope.max_depth { + return None; + } + + last_node = node; + + next_node = match node { + NodePointer::Root => { + let children = self + .nodes + .iter() + .enumerate() + .take_while(|n| n.1.parent == NodePointer::Root) + .map(|(index, node)| (NodePointer::Storage(index), node.candidate_hash)) + .collect::>(); + + self.find_valid_child(&mut ancestors, children.iter()).ok()? + }, + NodePointer::Storage(ptr) => { + let children = self.nodes.get(ptr).and_then(|n| Some(n.children.iter())); + if let Some(children) = children { + self.find_valid_child(&mut ancestors, children).ok()? + } else { + None + } + }, + }; + + depth += 1; + } + + Some(last_node) + } + + // Find a node from the given iterator which is present in the ancestors + // collection. If there are multiple such nodes, return an error and log a warning. We don't + // accept forks in a parachain to be backed. The supplied ancestors should all form a chain. + // If there is no such node, return None. + fn find_valid_child<'a>( + &self, + ancestors: &'a mut Ancestors, + nodes: impl Iterator + 'a, + ) -> Result, ()> { + let mut possible_children = + nodes.filter_map(|(node_ptr, hash)| match ancestors.remove(&hash) { + true => Some(node_ptr), + false => None, + }); + + // We don't accept forks in a parachain to be backed. The supplied ancestors + // should all form a chain. + let next = possible_children.next(); + if let Some(second_child) = possible_children.next() { + if let (Some(NodePointer::Storage(first_child)), NodePointer::Storage(second_child)) = + (next, second_child) + { + gum::error!( + target: LOG_TARGET, + para_id = ?self.scope.para, + relay_parent = ?self.scope.relay_parent, + "Trying to find new backable candidates for a parachain for which we've backed a fork.\ + This is a bug and the runtime should not have allowed it.\n\ + Backed candidates with the same parent: {}, {}", + self.nodes[*first_child].candidate_hash, + self.nodes[*second_child].candidate_hash, + ); + } + + Err(()) + } else { + Ok(next.copied()) + } + } + fn populate_from_bases(&mut self, storage: &CandidateStorage, initial_bases: Vec) { // Populate the tree breadth-first. let mut last_sweep_start = None; @@ -1061,8 +1153,18 @@ mod tests { use polkadot_node_subsystem_util::inclusion_emulator::InboundHrmpLimitations; use polkadot_primitives::{BlockNumber, CandidateCommitments, CandidateDescriptor, HeadData}; use polkadot_primitives_test_helpers as test_helpers; + use rstest::rstest; use std::iter; + impl NodePointer { + fn unwrap_idx(self) -> usize { + match self { + NodePointer::Root => panic!("Unexpected root"), + NodePointer::Storage(index) => index, + } + } + } + fn make_constraints( min_relay_parent_number: BlockNumber, valid_watermarks: Vec, @@ -1546,6 +1648,373 @@ mod tests { assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); } + #[test] + fn test_find_ancestor_path_and_find_backable_chain_empty_tree() { + let para_id = ParaId::from(5u32); + let relay_parent = Hash::repeat_byte(1); + let required_parent: HeadData = vec![0xff].into(); + let max_depth = 10; + + // Empty tree + let storage = CandidateStorage::new(); + let base_constraints = make_constraints(0, vec![0], required_parent.clone()); + + let relay_parent_info = + RelayChainBlockInfo { number: 0, hash: relay_parent, storage_root: Hash::zero() }; + + let scope = Scope::with_ancestors( + para_id, + relay_parent_info, + base_constraints, + vec![], + max_depth, + vec![], + ) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + assert_eq!(tree.candidates().collect::>().len(), 0); + assert_eq!(tree.nodes.len(), 0); + + assert_eq!(tree.find_ancestor_path(Ancestors::new()).unwrap(), NodePointer::Root); + assert_eq!(tree.find_backable_chain(Ancestors::new(), 2, |_| true), vec![]); + // Invalid candidate. + let ancestors: Ancestors = [CandidateHash::default()].into_iter().collect(); + assert_eq!(tree.find_ancestor_path(ancestors.clone()), Some(NodePointer::Root)); + assert_eq!(tree.find_backable_chain(ancestors, 2, |_| true), vec![]); + } + + #[rstest] + #[case(true, 13)] + #[case(false, 8)] + // The tree with no cycles looks like: + // Make a tree that looks like this (note that there's no cycle): + // +-(root)-+ + // | | + // +----0---+ 7 + // | | + // 1----+ 5 + // | | + // | | + // 2 6 + // | + // 3 + // | + // 4 + // + // The tree with cycles is the same as the first but has a cycle from 4 back to the state + // produced by 0 (It's bounded by the max_depth + 1). + // +-(root)-+ + // | | + // +----0---+ 7 + // | | + // 1----+ 5 + // | | + // | | + // 2 6 + // | + // 3 + // | + // 4---+ + // | | + // 1 5 + // | + // 2 + // | + // 3 + fn test_find_ancestor_path_and_find_backable_chain( + #[case] has_cycle: bool, + #[case] expected_node_count: usize, + ) { + let para_id = ParaId::from(5u32); + let relay_parent = Hash::repeat_byte(1); + let required_parent: HeadData = vec![0xff].into(); + let max_depth = 7; + let relay_parent_number = 0; + let relay_parent_storage_root = Hash::repeat_byte(69); + + let mut candidates = vec![]; + + // Candidate 0 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + required_parent.clone(), + vec![0].into(), + 0, + )); + // Candidate 1 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![0].into(), + vec![1].into(), + 0, + )); + // Candidate 2 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![1].into(), + vec![2].into(), + 0, + )); + // Candidate 3 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![2].into(), + vec![3].into(), + 0, + )); + // Candidate 4 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![3].into(), + vec![4].into(), + 0, + )); + // Candidate 5 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![0].into(), + vec![5].into(), + 0, + )); + // Candidate 6 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![1].into(), + vec![6].into(), + 0, + )); + // Candidate 7 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + required_parent.clone(), + vec![7].into(), + 0, + )); + + if has_cycle { + candidates[4] = make_committed_candidate( + para_id, + relay_parent, + 0, + vec![3].into(), + vec![0].into(), // put the cycle here back to the output state of 0. + 0, + ); + } + + let base_constraints = make_constraints(0, vec![0], required_parent.clone()); + let mut storage = CandidateStorage::new(); + + let relay_parent_info = RelayChainBlockInfo { + number: relay_parent_number, + hash: relay_parent, + storage_root: relay_parent_storage_root, + }; + + for (pvd, candidate) in candidates.iter() { + storage.add_candidate(candidate.clone(), pvd.clone()).unwrap(); + } + let candidates = + candidates.into_iter().map(|(_pvd, candidate)| candidate).collect::>(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_info, + base_constraints, + vec![], + max_depth, + vec![], + ) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + + assert_eq!(tree.candidates().collect::>().len(), candidates.len()); + assert_eq!(tree.nodes.len(), expected_node_count); + + // Do some common tests on both trees. + { + // No ancestors supplied. + assert_eq!(tree.find_ancestor_path(Ancestors::new()).unwrap(), NodePointer::Root); + assert_eq!( + tree.find_backable_chain(Ancestors::new(), 4, |_| true), + [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + // Ancestor which is not part of the tree. Will be ignored. + let ancestors: Ancestors = [CandidateHash::default()].into_iter().collect(); + assert_eq!(tree.find_ancestor_path(ancestors.clone()).unwrap(), NodePointer::Root); + assert_eq!( + tree.find_backable_chain(ancestors, 4, |_| true), + [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + // A chain fork. + let ancestors: Ancestors = + [(candidates[0].hash()), (candidates[7].hash())].into_iter().collect(); + assert_eq!(tree.find_ancestor_path(ancestors.clone()), None); + assert_eq!(tree.find_backable_chain(ancestors, 1, |_| true), vec![]); + + // Ancestors which are part of the tree but don't form a path. Will be ignored. + let ancestors: Ancestors = + [candidates[1].hash(), candidates[2].hash()].into_iter().collect(); + assert_eq!(tree.find_ancestor_path(ancestors.clone()).unwrap(), NodePointer::Root); + assert_eq!( + tree.find_backable_chain(ancestors, 4, |_| true), + [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + + // Valid ancestors. + let ancestors: Ancestors = [candidates[7].hash()].into_iter().collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[7].hash()); + assert_eq!(tree.find_backable_chain(ancestors, 1, |_| true), vec![]); + + let ancestors: Ancestors = + [candidates[2].hash(), candidates[0].hash(), candidates[1].hash()] + .into_iter() + .collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[2].hash()); + assert_eq!( + tree.find_backable_chain(ancestors.clone(), 2, |_| true), + [3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + + // Valid ancestors with candidates which have been omitted due to timeouts + let ancestors: Ancestors = + [candidates[0].hash(), candidates[2].hash()].into_iter().collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[0].hash()); + assert_eq!( + tree.find_backable_chain(ancestors, 3, |_| true), + [1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + + let ancestors: Ancestors = + [candidates[0].hash(), candidates[1].hash(), candidates[3].hash()] + .into_iter() + .collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[1].hash()); + if has_cycle { + assert_eq!( + tree.find_backable_chain(ancestors, 2, |_| true), + [2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + } else { + assert_eq!( + tree.find_backable_chain(ancestors, 4, |_| true), + [2, 3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + } + + let ancestors: Ancestors = + [candidates[1].hash(), candidates[2].hash()].into_iter().collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + assert_eq!(res, NodePointer::Root); + assert_eq!( + tree.find_backable_chain(ancestors, 4, |_| true), + [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + + // Requested count is 0. + assert_eq!(tree.find_backable_chain(Ancestors::new(), 0, |_| true), vec![]); + + let ancestors: Ancestors = + [candidates[2].hash(), candidates[0].hash(), candidates[1].hash()] + .into_iter() + .collect(); + assert_eq!(tree.find_backable_chain(ancestors, 0, |_| true), vec![]); + + let ancestors: Ancestors = + [candidates[2].hash(), candidates[0].hash()].into_iter().collect(); + assert_eq!(tree.find_backable_chain(ancestors, 0, |_| true), vec![]); + } + + // Now do some tests only on the tree with cycles + if has_cycle { + // Exceeds the maximum tree depth. 0-1-2-3-4-1-2-3-4, when the tree stops at + // 0-1-2-3-4-1-2-3. + let ancestors: Ancestors = [ + candidates[0].hash(), + candidates[1].hash(), + candidates[2].hash(), + candidates[3].hash(), + candidates[4].hash(), + ] + .into_iter() + .collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[4].hash()); + assert_eq!( + tree.find_backable_chain(ancestors, 4, |_| true), + [1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + + // 0-1-2. + let ancestors: Ancestors = + [candidates[0].hash(), candidates[1].hash(), candidates[2].hash()] + .into_iter() + .collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[2].hash()); + assert_eq!( + tree.find_backable_chain(ancestors.clone(), 1, |_| true), + [3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + assert_eq!( + tree.find_backable_chain(ancestors, 5, |_| true), + [3, 4, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + + // 0-1 + let ancestors: Ancestors = + [candidates[0].hash(), candidates[1].hash()].into_iter().collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[1].hash()); + assert_eq!( + tree.find_backable_chain(ancestors, 6, |_| true), + [2, 3, 4, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>(), + ); + + // For 0-1-2-3-4-5, there's more than 1 way of finding this path in + // the tree. `None` should be returned. The runtime should not have accepted this. + let ancestors: Ancestors = [ + candidates[0].hash(), + candidates[1].hash(), + candidates[2].hash(), + candidates[3].hash(), + candidates[4].hash(), + candidates[5].hash(), + ] + .into_iter() + .collect(); + let res = tree.find_ancestor_path(ancestors.clone()); + assert_eq!(res, None); + assert_eq!(tree.find_backable_chain(ancestors, 1, |_| true), vec![]); + } + } + #[test] fn graceful_cycle_of_0() { let mut storage = CandidateStorage::new(); @@ -1602,13 +2071,17 @@ mod tests { for count in 1..10 { assert_eq!( - tree.select_children(&[], count, |_| true), + tree.find_backable_chain(Ancestors::new(), count, |_| true), iter::repeat(candidate_a_hash) .take(std::cmp::min(count as usize, max_depth + 1)) .collect::>() ); assert_eq!( - tree.select_children(&[candidate_a_hash], count - 1, |_| true), + tree.find_backable_chain( + [candidate_a_hash].into_iter().collect(), + count - 1, + |_| true + ), iter::repeat(candidate_a_hash) .take(std::cmp::min(count as usize - 1, max_depth)) .collect::>() @@ -1682,22 +2155,22 @@ mod tests { assert_eq!(tree.nodes[3].candidate_hash, candidate_b_hash); assert_eq!(tree.nodes[4].candidate_hash, candidate_a_hash); - assert_eq!(tree.select_children(&[], 1, |_| true), vec![candidate_a_hash],); + assert_eq!(tree.find_backable_chain(Ancestors::new(), 1, |_| true), vec![candidate_a_hash],); assert_eq!( - tree.select_children(&[], 2, |_| true), + tree.find_backable_chain(Ancestors::new(), 2, |_| true), vec![candidate_a_hash, candidate_b_hash], ); assert_eq!( - tree.select_children(&[], 3, |_| true), + tree.find_backable_chain(Ancestors::new(), 3, |_| true), vec![candidate_a_hash, candidate_b_hash, candidate_a_hash], ); assert_eq!( - tree.select_children(&[candidate_a_hash], 2, |_| true), + tree.find_backable_chain([candidate_a_hash].into_iter().collect(), 2, |_| true), vec![candidate_b_hash, candidate_a_hash], ); assert_eq!( - tree.select_children(&[], 6, |_| true), + tree.find_backable_chain(Ancestors::new(), 6, |_| true), vec![ candidate_a_hash, candidate_b_hash, @@ -1706,10 +2179,17 @@ mod tests { candidate_a_hash ], ); - assert_eq!( - tree.select_children(&[candidate_a_hash, candidate_b_hash], 6, |_| true), - vec![candidate_a_hash, candidate_b_hash, candidate_a_hash,], - ); + + for count in 3..7 { + assert_eq!( + tree.find_backable_chain( + [candidate_a_hash, candidate_b_hash].into_iter().collect(), + count, + |_| true + ), + vec![candidate_a_hash, candidate_b_hash, candidate_a_hash], + ); + } } #[test] diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index 6e6915b92728ce745969581f5b700788f5156bc8..2b14e09b4fb4f56f5f7c6ffd738e4167804f44ad 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -35,7 +35,7 @@ use futures::{channel::oneshot, prelude::*}; use polkadot_node_subsystem::{ messages::{ - ChainApiMessage, FragmentTreeMembership, HypotheticalCandidate, + Ancestors, ChainApiMessage, FragmentTreeMembership, HypotheticalCandidate, HypotheticalFrontierRequest, IntroduceCandidateRequest, ProspectiveParachainsMessage, ProspectiveValidationDataRequest, RuntimeApiMessage, RuntimeApiRequest, }, @@ -150,16 +150,9 @@ async fn run_iteration( relay_parent, para, count, - required_path, + ancestors, tx, - ) => answer_get_backable_candidates( - &view, - relay_parent, - para, - count, - required_path, - tx, - ), + ) => answer_get_backable_candidates(&view, relay_parent, para, count, ancestors, tx), ProspectiveParachainsMessage::GetHypotheticalFrontier(request, tx) => answer_hypothetical_frontier_request(&view, request, tx), ProspectiveParachainsMessage::GetTreeMembership(para, candidate, tx) => @@ -298,7 +291,7 @@ async fn handle_active_leaves_update( ) .expect("ancestors are provided in reverse order and correctly; qed"); - gum::debug!( + gum::trace!( target: LOG_TARGET, relay_parent = ?hash, min_relay_parent = scope.earliest_relay_parent().number, @@ -565,7 +558,7 @@ fn answer_get_backable_candidates( relay_parent: Hash, para: ParaId, count: u32, - required_path: Vec, + ancestors: Ancestors, tx: oneshot::Sender>, ) { let data = match view.active_leaves.get(&relay_parent) { @@ -614,7 +607,7 @@ fn answer_get_backable_candidates( }; let backable_candidates: Vec<_> = tree - .select_children(&required_path, count, |candidate| storage.is_backed(candidate)) + .find_backable_chain(ancestors.clone(), count, |candidate| storage.is_backed(candidate)) .into_iter() .filter_map(|child_hash| { storage.relay_parent_by_candidate_hash(&child_hash).map_or_else( @@ -635,7 +628,7 @@ fn answer_get_backable_candidates( if backable_candidates.is_empty() { gum::trace!( target: LOG_TARGET, - ?required_path, + ?ancestors, para_id = ?para, %relay_parent, "Could not find any backable candidate", diff --git a/polkadot/node/core/prospective-parachains/src/tests.rs b/polkadot/node/core/prospective-parachains/src/tests.rs index 732736b101de0ce525c7753af4b60cca7534522b..0beddbf1416a7ec94cb84a6893e9c20cbeaf28ce 100644 --- a/polkadot/node/core/prospective-parachains/src/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/tests.rs @@ -407,7 +407,7 @@ async fn get_backable_candidates( virtual_overseer: &mut VirtualOverseer, leaf: &TestLeaf, para_id: ParaId, - required_path: Vec, + ancestors: Ancestors, count: u32, expected_result: Vec<(CandidateHash, Hash)>, ) { @@ -415,11 +415,7 @@ async fn get_backable_candidates( virtual_overseer .send(overseer::FromOrchestra::Communication { msg: ProspectiveParachainsMessage::GetBackableCandidates( - leaf.hash, - para_id, - count, - required_path, - tx, + leaf.hash, para_id, count, ancestors, tx, ), }) .await; @@ -903,7 +899,7 @@ fn check_backable_query_single_candidate() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a], + vec![candidate_hash_a].into_iter().collect(), 1, vec![], ) @@ -912,12 +908,20 @@ fn check_backable_query_single_candidate() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a], + vec![candidate_hash_a].into_iter().collect(), + 0, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + Ancestors::new(), 0, vec![], ) .await; - get_backable_candidates(&mut virtual_overseer, &leaf_a, 1.into(), vec![], 0, vec![]).await; // Second candidates. second_candidate(&mut virtual_overseer, candidate_a.clone()).await; @@ -928,7 +932,7 @@ fn check_backable_query_single_candidate() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a], + vec![candidate_hash_a].into_iter().collect(), 1, vec![], ) @@ -939,12 +943,20 @@ fn check_backable_query_single_candidate() { back_candidate(&mut virtual_overseer, &candidate_b, candidate_hash_b).await; // Should not get any backable candidates for the other para. - get_backable_candidates(&mut virtual_overseer, &leaf_a, 2.into(), vec![], 1, vec![]).await; get_backable_candidates( &mut virtual_overseer, &leaf_a, 2.into(), - vec![candidate_hash_a], + Ancestors::new(), + 1, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 2.into(), + vec![candidate_hash_a].into_iter().collect(), 1, vec![], ) @@ -955,7 +967,7 @@ fn check_backable_query_single_candidate() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![], + Ancestors::new(), 1, vec![(candidate_hash_a, leaf_a.hash)], ) @@ -964,20 +976,20 @@ fn check_backable_query_single_candidate() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a], + vec![candidate_hash_a].into_iter().collect(), 1, vec![(candidate_hash_b, leaf_a.hash)], ) .await; - // Should not get anything at the wrong path. + // Wrong path get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_b], + vec![candidate_hash_b].into_iter().collect(), 1, - vec![], + vec![(candidate_hash_a, leaf_a.hash)], ) .await; @@ -1075,15 +1087,29 @@ fn check_backable_query_multiple_candidates() { make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_i, 10); // Should not get any backable candidates for the other para. - get_backable_candidates(&mut virtual_overseer, &leaf_a, 2.into(), vec![], 1, vec![]) - .await; - get_backable_candidates(&mut virtual_overseer, &leaf_a, 2.into(), vec![], 5, vec![]) - .await; get_backable_candidates( &mut virtual_overseer, &leaf_a, 2.into(), - vec![candidate_hash_a], + Ancestors::new(), + 1, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 2.into(), + Ancestors::new(), + 5, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 2.into(), + vec![candidate_hash_a].into_iter().collect(), 1, vec![], ) @@ -1097,7 +1123,7 @@ fn check_backable_query_multiple_candidates() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![], + Ancestors::new(), 1, vec![(candidate_hash_a, leaf_a.hash)], ) @@ -1106,7 +1132,7 @@ fn check_backable_query_multiple_candidates() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![], + Ancestors::new(), 4, vec![ (candidate_hash_a, leaf_a.hash), @@ -1124,7 +1150,7 @@ fn check_backable_query_multiple_candidates() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a], + vec![candidate_hash_a].into_iter().collect(), 1, vec![(candidate_hash_b, leaf_a.hash)], ) @@ -1133,16 +1159,7 @@ fn check_backable_query_multiple_candidates() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a], - 2, - vec![(candidate_hash_b, leaf_a.hash), (candidate_hash_d, leaf_a.hash)], - ) - .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a], + vec![candidate_hash_a].into_iter().collect(), 3, vec![ (candidate_hash_b, leaf_a.hash), @@ -1159,7 +1176,7 @@ fn check_backable_query_multiple_candidates() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a], + vec![candidate_hash_a].into_iter().collect(), count, vec![ (candidate_hash_c, leaf_a.hash), @@ -1172,26 +1189,30 @@ fn check_backable_query_multiple_candidates() { } } - // required path of 2 + // required path of 2 and higher { get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a, candidate_hash_b], + vec![candidate_hash_a, candidate_hash_i, candidate_hash_h, candidate_hash_c] + .into_iter() + .collect(), 1, - vec![(candidate_hash_d, leaf_a.hash)], + vec![(candidate_hash_j, leaf_a.hash)], ) .await; + get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a, candidate_hash_c], + vec![candidate_hash_a, candidate_hash_b].into_iter().collect(), 1, - vec![(candidate_hash_h, leaf_a.hash)], + vec![(candidate_hash_d, leaf_a.hash)], ) .await; + // If the requested count exceeds the largest chain, return the longest // chain we can get. for count in 4..10 { @@ -1199,7 +1220,7 @@ fn check_backable_query_multiple_candidates() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a, candidate_hash_c], + vec![candidate_hash_a, candidate_hash_c].into_iter().collect(), count, vec![ (candidate_hash_h, leaf_a.hash), @@ -1213,317 +1234,127 @@ fn check_backable_query_multiple_candidates() { // No more candidates in any chain. { - let required_paths = vec![ - vec![candidate_hash_a, candidate_hash_b, candidate_hash_e], - vec![ - candidate_hash_a, - candidate_hash_c, - candidate_hash_h, - candidate_hash_i, - candidate_hash_j, - ], - ]; - for path in required_paths { - for count in 1..4 { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - path.clone(), - count, - vec![], - ) - .await; - } + for count in 1..4 { + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + vec![candidate_hash_a, candidate_hash_b, candidate_hash_e] + .into_iter() + .collect(), + count, + vec![], + ) + .await; + + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + vec![ + candidate_hash_a, + candidate_hash_c, + candidate_hash_h, + candidate_hash_i, + candidate_hash_j, + ] + .into_iter() + .collect(), + count, + vec![], + ) + .await; } } - // Should not get anything at the wrong path. + // Wrong paths. get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_b], + vec![candidate_hash_b].into_iter().collect(), 1, - vec![], + vec![(candidate_hash_a, leaf_a.hash)], ) .await; get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_b, candidate_hash_a], + vec![candidate_hash_b, candidate_hash_f].into_iter().collect(), 3, - vec![], + vec![ + (candidate_hash_a, leaf_a.hash), + (candidate_hash_b, leaf_a.hash), + (candidate_hash_d, leaf_a.hash), + ], ) .await; get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a, candidate_hash_b, candidate_hash_c], - 3, - vec![], + vec![candidate_hash_a, candidate_hash_h].into_iter().collect(), + 4, + vec![ + (candidate_hash_c, leaf_a.hash), + (candidate_hash_h, leaf_a.hash), + (candidate_hash_i, leaf_a.hash), + (candidate_hash_j, leaf_a.hash), + ], ) .await; - - virtual_overseer - }); - - assert_eq!(view.active_leaves.len(), 1); - assert_eq!(view.candidate_storage.len(), 2); - // 10 candidates and 7 parents on para 1. - assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (7, 10)); - assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0)); - } - - // A tree with multiple roots. - // Parachain 1 looks like this: - // (imaginary root) - // | | - // +----B---+ A - // | | | | - // | | | C - // D E F | - // | H - // G | - // I - // | - // J - { - let test_state = TestState::default(); - let view = test_harness(|mut virtual_overseer| async move { - // Leaf A - let leaf_a = TestLeaf { - number: 100, - hash: Hash::from_low_u64_be(130), - para_data: vec![ - (1.into(), PerParaData::new(97, HeadData(vec![1, 2, 3]))), - (2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))), - ], - }; - - // Activate leaves. - activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await; - - // Candidate B - let (candidate_b, pvd_b) = make_candidate( - leaf_a.hash, - leaf_a.number, + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, 1.into(), - HeadData(vec![1, 2, 3]), - HeadData(vec![2]), - test_state.validation_code_hash, - ); - let candidate_hash_b = candidate_b.hash(); - introduce_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b).await; - second_candidate(&mut virtual_overseer, candidate_b.clone()).await; - back_candidate(&mut virtual_overseer, &candidate_b, candidate_hash_b).await; + vec![candidate_hash_e, candidate_hash_h].into_iter().collect(), + 2, + vec![(candidate_hash_a, leaf_a.hash), (candidate_hash_b, leaf_a.hash)], + ) + .await; - // Candidate A - let (candidate_a, pvd_a) = make_candidate( - leaf_a.hash, - leaf_a.number, + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, 1.into(), - HeadData(vec![1, 2, 3]), - HeadData(vec![1]), - test_state.validation_code_hash, - ); - let candidate_hash_a = candidate_a.hash(); - introduce_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await; - second_candidate(&mut virtual_overseer, candidate_a.clone()).await; - back_candidate(&mut virtual_overseer, &candidate_a, candidate_hash_a).await; - - let (candidate_c, candidate_hash_c) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_a, 3); - let (_candidate_d, candidate_hash_d) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_b, 4); - let (_candidate_e, candidate_hash_e) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_b, 5); - let (candidate_f, candidate_hash_f) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_b, 6); - let (_candidate_g, candidate_hash_g) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_f, 7); - let (candidate_h, candidate_hash_h) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_c, 8); - let (candidate_i, candidate_hash_i) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_h, 9); - let (_candidate_j, candidate_hash_j) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_i, 10); + vec![candidate_hash_a, candidate_hash_c, candidate_hash_d].into_iter().collect(), + 2, + vec![(candidate_hash_h, leaf_a.hash), (candidate_hash_i, leaf_a.hash)], + ) + .await; - // Should not get any backable candidates for the other para. - get_backable_candidates(&mut virtual_overseer, &leaf_a, 2.into(), vec![], 1, vec![]) - .await; - get_backable_candidates(&mut virtual_overseer, &leaf_a, 2.into(), vec![], 5, vec![]) - .await; + // Parachain fork. get_backable_candidates( &mut virtual_overseer, &leaf_a, - 2.into(), - vec![candidate_hash_a], + 1.into(), + vec![candidate_hash_a, candidate_hash_b, candidate_hash_c].into_iter().collect(), 1, vec![], ) .await; - // Test various scenarios with various counts. - - // empty required_path - { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![], - 1, - vec![(candidate_hash_b, leaf_a.hash)], - ) - .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![], - 2, - vec![(candidate_hash_b, leaf_a.hash), (candidate_hash_d, leaf_a.hash)], - ) - .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![], - 4, - vec![ - (candidate_hash_a, leaf_a.hash), - (candidate_hash_c, leaf_a.hash), - (candidate_hash_h, leaf_a.hash), - (candidate_hash_i, leaf_a.hash), - ], - ) - .await; - } - - // required path of 1 - { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a], - 1, - vec![(candidate_hash_c, leaf_a.hash)], - ) - .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_b], - 1, - vec![(candidate_hash_d, leaf_a.hash)], - ) - .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a], - 2, - vec![(candidate_hash_c, leaf_a.hash), (candidate_hash_h, leaf_a.hash)], - ) - .await; - - // If the requested count exceeds the largest chain, return the longest - // chain we can get. - for count in 2..10 { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_b], - count, - vec![(candidate_hash_f, leaf_a.hash), (candidate_hash_g, leaf_a.hash)], - ) - .await; - } - } - - // required path of 2 - { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_b, candidate_hash_f], - 1, - vec![(candidate_hash_g, leaf_a.hash)], - ) - .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a, candidate_hash_c], - 1, - vec![(candidate_hash_h, leaf_a.hash)], - ) - .await; - // If the requested count exceeds the largest chain, return the longest - // chain we can get. - for count in 4..10 { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a, candidate_hash_c], - count, - vec![ - (candidate_hash_h, leaf_a.hash), - (candidate_hash_i, leaf_a.hash), - (candidate_hash_j, leaf_a.hash), - ], - ) - .await; - } - } - - // No more candidates in any chain. - { - let required_paths = vec![ - vec![candidate_hash_b, candidate_hash_f, candidate_hash_g], - vec![candidate_hash_b, candidate_hash_e], - vec![candidate_hash_b, candidate_hash_d], - vec![ - candidate_hash_a, - candidate_hash_c, - candidate_hash_h, - candidate_hash_i, - candidate_hash_j, - ], - ]; - for path in required_paths { - for count in 1..4 { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - path.clone(), - count, - vec![], - ) - .await; - } - } - } + // Non-existent candidate. + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + vec![candidate_hash_a, CandidateHash(Hash::from_low_u64_be(100))] + .into_iter() + .collect(), + 2, + vec![(candidate_hash_b, leaf_a.hash), (candidate_hash_d, leaf_a.hash)], + ) + .await; - // Should not get anything at the wrong path. + // Requested count is zero. get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_d], - 1, + Ancestors::new(), + 0, vec![], ) .await; @@ -1531,8 +1362,8 @@ fn check_backable_query_multiple_candidates() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_b, candidate_hash_a], - 3, + vec![candidate_hash_a].into_iter().collect(), + 0, vec![], ) .await; @@ -1540,8 +1371,8 @@ fn check_backable_query_multiple_candidates() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a, candidate_hash_c, candidate_hash_d], - 3, + vec![candidate_hash_a, candidate_hash_b].into_iter().collect(), + 0, vec![], ) .await; @@ -1853,8 +1684,8 @@ fn check_pvd_query() { assert_eq!(view.candidate_storage.len(), 2); } -// Test simultaneously activating and deactivating leaves, and simultaneously deactivating multiple -// leaves. +// Test simultaneously activating and deactivating leaves, and simultaneously deactivating +// multiple leaves. #[test] fn correctly_updates_leaves() { let test_state = TestState::default(); @@ -2048,7 +1879,7 @@ fn persists_pending_availability_candidate() { &mut virtual_overseer, &leaf_b, para_id, - vec![candidate_hash_a], + vec![candidate_hash_a].into_iter().collect(), 1, vec![(candidate_hash_b, leaf_b_hash)], ) @@ -2113,7 +1944,7 @@ fn backwards_compatible() { &mut virtual_overseer, &leaf_a, para_id, - vec![], + Ancestors::new(), 1, vec![(candidate_hash_a, candidate_relay_parent)], ) @@ -2135,7 +1966,15 @@ fn backwards_compatible() { ) .await; - get_backable_candidates(&mut virtual_overseer, &leaf_b, para_id, vec![], 1, vec![]).await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_b, + para_id, + Ancestors::new(), + 1, + vec![], + ) + .await; virtual_overseer }); @@ -2162,13 +2001,13 @@ fn uses_ancestry_only_within_session() { .await; assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx)) - ) if parent == hash => { - tx.send(Ok(AsyncBackingParams { max_candidate_depth: 0, allowed_ancestry_len: ancestry_len })).unwrap(); - } - ); + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx)) + ) if parent == hash => { + tx.send(Ok(AsyncBackingParams { max_candidate_depth: 0, allowed_ancestry_len: ancestry_len + })).unwrap(); } + ); assert_matches!( virtual_overseer.recv().await, diff --git a/polkadot/node/core/provisioner/Cargo.toml b/polkadot/node/core/provisioner/Cargo.toml index 175980e90a057f136de25ad7091bcaef497f0a5f..2a09e2b5b2cc83a0596c6f0ae153e66f8f6a1e66 100644 --- a/polkadot/node/core/provisioner/Cargo.toml +++ b/polkadot/node/core/provisioner/Cargo.toml @@ -13,16 +13,18 @@ workspace = true bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } futures = "0.3.21" gum = { package = "tracing-gum", path = "../../gum" } -thiserror = "1.0.48" +thiserror = { workspace = true } polkadot-primitives = { path = "../../../primitives" } polkadot-node-primitives = { path = "../../primitives" } polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } futures-timer = "3.0.2" fatality = "0.0.6" +schnellru = "0.2.1" [dev-dependencies] sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto" } sp-keystore = { path = "../../../../substrate/primitives/keystore" } polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../../primitives/test-helpers" } +rstest = "0.18.2" diff --git a/polkadot/node/core/provisioner/src/error.rs b/polkadot/node/core/provisioner/src/error.rs index 376d69f276fc892e92828bf994b29f847fae31d0..aae3234c3cc49d19bdaf20b87fa71cf3f3276bfe 100644 --- a/polkadot/node/core/provisioner/src/error.rs +++ b/polkadot/node/core/provisioner/src/error.rs @@ -44,14 +44,17 @@ pub enum Error { #[error("failed to get block number")] CanceledBlockNumber(#[source] oneshot::Canceled), + #[error("failed to get session index")] + CanceledSessionIndex(#[source] oneshot::Canceled), + #[error("failed to get backed candidates")] CanceledBackedCandidates(#[source] oneshot::Canceled), #[error("failed to get votes on dispute")] CanceledCandidateVotes(#[source] oneshot::Canceled), - #[error("failed to get backable candidate from prospective parachains")] - CanceledBackableCandidate(#[source] oneshot::Canceled), + #[error("failed to get backable candidates from prospective parachains")] + CanceledBackableCandidates(#[source] oneshot::Canceled), #[error(transparent)] ChainApi(#[from] ChainApiError), @@ -71,11 +74,6 @@ pub enum Error { #[error("failed to send return message with Inherents")] InherentDataReturnChannel, - #[error( - "backed candidate does not correspond to selected candidate; check logic in provisioner" - )] - BackedCandidateOrderingProblem, - #[fatal] #[error("Failed to spawn background task")] FailedToSpawnBackgroundTask, diff --git a/polkadot/node/core/provisioner/src/lib.rs b/polkadot/node/core/provisioner/src/lib.rs index 51f768d782e0bff3d54efd70523120658b73bba2..c9ed873d3c25ae1a0fbe590a51412839e8105634 100644 --- a/polkadot/node/core/provisioner/src/lib.rs +++ b/polkadot/node/core/provisioner/src/lib.rs @@ -24,26 +24,29 @@ use futures::{ channel::oneshot, future::BoxFuture, prelude::*, stream::FuturesUnordered, FutureExt, }; use futures_timer::Delay; +use schnellru::{ByLength, LruMap}; use polkadot_node_subsystem::{ jaeger, messages::{ - CandidateBackingMessage, ChainApiMessage, ProspectiveParachainsMessage, ProvisionableData, - ProvisionerInherentData, ProvisionerMessage, RuntimeApiRequest, + Ancestors, CandidateBackingMessage, ChainApiMessage, ProspectiveParachainsMessage, + ProvisionableData, ProvisionerInherentData, ProvisionerMessage, RuntimeApiRequest, }, overseer, ActivatedLeaf, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, PerLeafSpan, SpawnedSubsystem, SubsystemError, }; use polkadot_node_subsystem_util::{ has_required_runtime, request_availability_cores, request_persisted_validation_data, - runtime::{prospective_parachains_mode, ProspectiveParachainsMode}, + request_session_index_for_child, + runtime::{prospective_parachains_mode, request_node_features, ProspectiveParachainsMode}, TimeoutExt, }; use polkadot_primitives::{ - BackedCandidate, BlockNumber, CandidateHash, CandidateReceipt, CoreState, Hash, Id as ParaId, - OccupiedCoreAssumption, SignedAvailabilityBitfield, ValidatorIndex, + vstaging::{node_features::FeatureIndex, NodeFeatures}, + BackedCandidate, BlockNumber, CandidateHash, CandidateReceipt, CoreIndex, CoreState, Hash, + Id as ParaId, OccupiedCoreAssumption, SessionIndex, SignedAvailabilityBitfield, ValidatorIndex, }; -use std::collections::{BTreeMap, HashMap}; +use std::collections::{BTreeMap, HashMap, HashSet}; mod disputes; mod error; @@ -77,11 +80,18 @@ impl ProvisionerSubsystem { } } +/// Per-session info we need for the provisioner subsystem. +pub struct PerSession { + prospective_parachains_mode: ProspectiveParachainsMode, + elastic_scaling_mvp: bool, +} + /// A per-relay-parent state for the provisioning subsystem. pub struct PerRelayParent { leaf: ActivatedLeaf, backed_candidates: Vec, prospective_parachains_mode: ProspectiveParachainsMode, + elastic_scaling_mvp: bool, signed_bitfields: Vec, is_inherent_ready: bool, awaiting_inherent: Vec>, @@ -89,13 +99,14 @@ pub struct PerRelayParent { } impl PerRelayParent { - fn new(leaf: ActivatedLeaf, prospective_parachains_mode: ProspectiveParachainsMode) -> Self { + fn new(leaf: ActivatedLeaf, per_session: &PerSession) -> Self { let span = PerLeafSpan::new(leaf.span.clone(), "provisioner"); Self { leaf, backed_candidates: Vec::new(), - prospective_parachains_mode, + prospective_parachains_mode: per_session.prospective_parachains_mode, + elastic_scaling_mvp: per_session.elastic_scaling_mvp, signed_bitfields: Vec::new(), is_inherent_ready: false, awaiting_inherent: Vec::new(), @@ -124,10 +135,17 @@ impl ProvisionerSubsystem { async fn run(mut ctx: Context, metrics: Metrics) -> FatalResult<()> { let mut inherent_delays = InherentDelays::new(); let mut per_relay_parent = HashMap::new(); + let mut per_session = LruMap::new(ByLength::new(2)); loop { - let result = - run_iteration(&mut ctx, &mut per_relay_parent, &mut inherent_delays, &metrics).await; + let result = run_iteration( + &mut ctx, + &mut per_relay_parent, + &mut per_session, + &mut inherent_delays, + &metrics, + ) + .await; match result { Ok(()) => break, @@ -142,6 +160,7 @@ async fn run(mut ctx: Context, metrics: Metrics) -> FatalResult<()> { async fn run_iteration( ctx: &mut Context, per_relay_parent: &mut HashMap, + per_session: &mut LruMap, inherent_delays: &mut InherentDelays, metrics: &Metrics, ) -> Result<(), Error> { @@ -151,7 +170,7 @@ async fn run_iteration( // Map the error to ensure that the subsystem exits when the overseer is gone. match from_overseer.map_err(Error::OverseerExited)? { FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => - handle_active_leaves_update(ctx.sender(), update, per_relay_parent, inherent_delays).await?, + handle_active_leaves_update(ctx.sender(), update, per_relay_parent, per_session, inherent_delays).await?, FromOrchestra::Signal(OverseerSignal::BlockFinalized(..)) => {}, FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(()), FromOrchestra::Communication { msg } => { @@ -183,6 +202,7 @@ async fn handle_active_leaves_update( sender: &mut impl overseer::ProvisionerSenderTrait, update: ActiveLeavesUpdate, per_relay_parent: &mut HashMap, + per_session: &mut LruMap, inherent_delays: &mut InherentDelays, ) -> Result<(), Error> { gum::trace!(target: LOG_TARGET, "Handle ActiveLeavesUpdate"); @@ -191,10 +211,31 @@ async fn handle_active_leaves_update( } if let Some(leaf) = update.activated { + let session_index = request_session_index_for_child(leaf.hash, sender) + .await + .await + .map_err(Error::CanceledSessionIndex)??; + if per_session.get(&session_index).is_none() { + let prospective_parachains_mode = + prospective_parachains_mode(sender, leaf.hash).await?; + let elastic_scaling_mvp = request_node_features(leaf.hash, session_index, sender) + .await? + .unwrap_or(NodeFeatures::EMPTY) + .get(FeatureIndex::ElasticScalingMVP as usize) + .map(|b| *b) + .unwrap_or(false); + + per_session.insert( + session_index, + PerSession { prospective_parachains_mode, elastic_scaling_mvp }, + ); + } + + let session_info = per_session.get(&session_index).expect("Just inserted"); + gum::trace!(target: LOG_TARGET, leaf_hash=?leaf.hash, "Adding delay"); - let prospective_parachains_mode = prospective_parachains_mode(sender, leaf.hash).await?; let delay_fut = Delay::new(PRE_PROPOSE_TIMEOUT).map(move |_| leaf.hash).boxed(); - per_relay_parent.insert(leaf.hash, PerRelayParent::new(leaf, prospective_parachains_mode)); + per_relay_parent.insert(leaf.hash, PerRelayParent::new(leaf, session_info)); inherent_delays.push(delay_fut); } @@ -253,6 +294,7 @@ async fn send_inherent_data_bg( let signed_bitfields = per_relay_parent.signed_bitfields.clone(); let backed_candidates = per_relay_parent.backed_candidates.clone(); let mode = per_relay_parent.prospective_parachains_mode; + let elastic_scaling_mvp = per_relay_parent.elastic_scaling_mvp; let span = per_relay_parent.span.child("req-inherent-data"); let mut sender = ctx.sender().clone(); @@ -272,6 +314,7 @@ async fn send_inherent_data_bg( &signed_bitfields, &backed_candidates, mode, + elastic_scaling_mvp, return_senders, &mut sender, &metrics, @@ -383,6 +426,7 @@ async fn send_inherent_data( bitfields: &[SignedAvailabilityBitfield], candidates: &[CandidateReceipt], prospective_parachains_mode: ProspectiveParachainsMode, + elastic_scaling_mvp: bool, return_senders: Vec>, from_job: &mut impl overseer::ProvisionerSenderTrait, metrics: &Metrics, @@ -434,6 +478,7 @@ async fn send_inherent_data( &bitfields, candidates, prospective_parachains_mode, + elastic_scaling_mvp, leaf.hash, from_job, ) @@ -558,6 +603,8 @@ async fn select_candidate_hashes_from_tracked( let mut selected_candidates = Vec::with_capacity(candidates.len().min(availability_cores.len())); + let mut selected_parachains = + HashSet::with_capacity(candidates.len().min(availability_cores.len())); gum::debug!( target: LOG_TARGET, @@ -591,6 +638,12 @@ async fn select_candidate_hashes_from_tracked( CoreState::Free => continue, }; + if selected_parachains.contains(&scheduled_core.para_id) { + // We already picked a candidate for this parachain. Elastic scaling only works with + // prospective parachains mode. + continue + } + let validation_data = match request_persisted_validation_data( relay_parent, scheduled_core.para_id, @@ -624,6 +677,7 @@ async fn select_candidate_hashes_from_tracked( "Selected candidate receipt", ); + selected_parachains.insert(candidate.descriptor.para_id); selected_candidates.push((candidate_hash, candidate.descriptor.relay_parent)); } } @@ -637,63 +691,93 @@ async fn select_candidate_hashes_from_tracked( /// Should be called when prospective parachains are enabled. async fn request_backable_candidates( availability_cores: &[CoreState], + elastic_scaling_mvp: bool, bitfields: &[SignedAvailabilityBitfield], relay_parent: Hash, sender: &mut impl overseer::ProvisionerSenderTrait, ) -> Result, Error> { let block_number = get_block_number_under_construction(relay_parent, sender).await?; - let mut selected_candidates = Vec::with_capacity(availability_cores.len()); + // Record how many cores are scheduled for each paraid. Use a BTreeMap because + // we'll need to iterate through them. + let mut scheduled_cores: BTreeMap = BTreeMap::new(); + // The on-chain ancestors of a para present in availability-cores. + let mut ancestors: HashMap = + HashMap::with_capacity(availability_cores.len()); for (core_idx, core) in availability_cores.iter().enumerate() { - let (para_id, required_path) = match core { + let core_idx = CoreIndex(core_idx as u32); + match core { CoreState::Scheduled(scheduled_core) => { - // The core is free, pick the first eligible candidate from - // the fragment tree. - (scheduled_core.para_id, Vec::new()) + *scheduled_cores.entry(scheduled_core.para_id).or_insert(0) += 1; }, CoreState::Occupied(occupied_core) => { - if bitfields_indicate_availability(core_idx, bitfields, &occupied_core.availability) - { + let is_available = bitfields_indicate_availability( + core_idx.0 as usize, + bitfields, + &occupied_core.availability, + ); + + if is_available { + ancestors + .entry(occupied_core.para_id()) + .or_default() + .insert(occupied_core.candidate_hash); + if let Some(ref scheduled_core) = occupied_core.next_up_on_available { - // The candidate occupying the core is available, choose its - // child in the fragment tree. - // - // TODO: doesn't work for on-demand parachains. We lean hard on the - // assumption that cores are fixed to specific parachains within a session. - // https://github.com/paritytech/polkadot/issues/5492 - (scheduled_core.para_id, vec![occupied_core.candidate_hash]) - } else { - continue - } - } else { - if occupied_core.time_out_at != block_number { - continue + // Request a new backable candidate for the newly scheduled para id. + *scheduled_cores.entry(scheduled_core.para_id).or_insert(0) += 1; } + } else if occupied_core.time_out_at <= block_number { + // Timed out before being available. + if let Some(ref scheduled_core) = occupied_core.next_up_on_time_out { // Candidate's availability timed out, practically same as scheduled. - (scheduled_core.para_id, Vec::new()) - } else { - continue + *scheduled_cores.entry(scheduled_core.para_id).or_insert(0) += 1; } + } else { + // Not timed out and not available. + ancestors + .entry(occupied_core.para_id()) + .or_default() + .insert(occupied_core.candidate_hash); } }, CoreState::Free => continue, }; + } - let response = get_backable_candidate(relay_parent, para_id, required_path, sender).await?; + let mut selected_candidates: Vec<(CandidateHash, Hash)> = + Vec::with_capacity(availability_cores.len()); - match response { - Some((hash, relay_parent)) => selected_candidates.push((hash, relay_parent)), - None => { - gum::debug!( - target: LOG_TARGET, - leaf_hash = ?relay_parent, - core = core_idx, - "No backable candidate returned by prospective parachains", - ); - }, + for (para_id, core_count) in scheduled_cores { + let para_ancestors = ancestors.remove(¶_id).unwrap_or_default(); + + // If elastic scaling MVP is disabled, only allow one candidate per parachain. + if !elastic_scaling_mvp && core_count > 1 { + continue } + + let response = get_backable_candidates( + relay_parent, + para_id, + para_ancestors, + core_count as u32, + sender, + ) + .await?; + + if response.is_empty() { + gum::debug!( + target: LOG_TARGET, + leaf_hash = ?relay_parent, + ?para_id, + "No backable candidate returned by prospective parachains", + ); + continue + } + + selected_candidates.extend(response.into_iter().take(core_count)); } Ok(selected_candidates) @@ -706,6 +790,7 @@ async fn select_candidates( bitfields: &[SignedAvailabilityBitfield], candidates: &[CandidateReceipt], prospective_parachains_mode: ProspectiveParachainsMode, + elastic_scaling_mvp: bool, relay_parent: Hash, sender: &mut impl overseer::ProvisionerSenderTrait, ) -> Result, Error> { @@ -715,7 +800,14 @@ async fn select_candidates( let selected_candidates = match prospective_parachains_mode { ProspectiveParachainsMode::Enabled { .. } => - request_backable_candidates(availability_cores, bitfields, relay_parent, sender).await?, + request_backable_candidates( + availability_cores, + elastic_scaling_mvp, + bitfields, + relay_parent, + sender, + ) + .await?, ProspectiveParachainsMode::Disabled => select_candidate_hashes_from_tracked( availability_cores, @@ -726,6 +818,7 @@ async fn select_candidates( ) .await?, }; + gum::debug!(target: LOG_TARGET, ?selected_candidates, "Got backable candidates"); // now get the backed candidates corresponding to these candidate receipts let (tx, rx) = oneshot::channel(); @@ -737,28 +830,10 @@ async fn select_candidates( gum::trace!(target: LOG_TARGET, leaf_hash=?relay_parent, "Got {} backed candidates", candidates.len()); - // `selected_candidates` is generated in ascending order by core index, and - // `GetBackedCandidates` _should_ preserve that property, but let's just make sure. - // - // We can't easily map from `BackedCandidate` to `core_idx`, but we know that every selected - // candidate maps to either 0 or 1 backed candidate, and the hashes correspond. Therefore, by - // checking them in order, we can ensure that the backed candidates are also in order. - let mut backed_idx = 0; - for selected in selected_candidates { - if selected.0 == - candidates.get(backed_idx).ok_or(Error::BackedCandidateOrderingProblem)?.hash() - { - backed_idx += 1; - } - } - if candidates.len() != backed_idx { - Err(Error::BackedCandidateOrderingProblem)?; - } - // keep only one candidate with validation code. let mut with_validation_code = false; candidates.retain(|c| { - if c.candidate.commitments.new_validation_code.is_some() { + if c.candidate().commitments.new_validation_code.is_some() { if with_validation_code { return false } @@ -796,28 +871,27 @@ async fn get_block_number_under_construction( } } -/// Requests backable candidate from Prospective Parachains based on -/// the given path in the fragment tree. -async fn get_backable_candidate( +/// Requests backable candidates from Prospective Parachains based on +/// the given ancestors in the fragment tree. The ancestors may not be ordered. +async fn get_backable_candidates( relay_parent: Hash, para_id: ParaId, - required_path: Vec, + ancestors: Ancestors, + count: u32, sender: &mut impl overseer::ProvisionerSenderTrait, -) -> Result, Error> { +) -> Result, Error> { let (tx, rx) = oneshot::channel(); sender .send_message(ProspectiveParachainsMessage::GetBackableCandidates( relay_parent, para_id, - 1, // core count hardcoded to 1, until elastic scaling is implemented and enabled. - required_path, + count, + ancestors, tx, )) .await; - rx.await - .map_err(Error::CanceledBackableCandidate) - .map(|res| res.get(0).copied()) + rx.await.map_err(Error::CanceledBackableCandidates) } /// The availability bitfield for a given core is the transpose diff --git a/polkadot/node/core/provisioner/src/tests.rs b/polkadot/node/core/provisioner/src/tests.rs index b26df8ddb910465fc9920d462fd9266935112175..bdb4f85f4009bdfff879cefcb8928899c5b85c81 100644 --- a/polkadot/node/core/provisioner/src/tests.rs +++ b/polkadot/node/core/provisioner/src/tests.rs @@ -22,6 +22,9 @@ use polkadot_primitives::{OccupiedCore, ScheduledCore}; const MOCK_GROUP_SIZE: usize = 5; pub fn occupied_core(para_id: u32) -> CoreState { + let mut candidate_descriptor = dummy_candidate_descriptor(dummy_hash()); + candidate_descriptor.para_id = para_id.into(); + CoreState::Occupied(OccupiedCore { group_responsible: para_id.into(), next_up_on_available: None, @@ -29,7 +32,7 @@ pub fn occupied_core(para_id: u32) -> CoreState { time_out_at: 200_u32, next_up_on_time_out: None, availability: bitvec![u8, bitvec::order::Lsb0; 0; 32], - candidate_descriptor: dummy_candidate_descriptor(dummy_hash()), + candidate_descriptor, candidate_hash: Default::default(), }) } @@ -254,10 +257,56 @@ mod select_candidates { use polkadot_primitives::{ BlockNumber, CandidateCommitments, CommittedCandidateReceipt, PersistedValidationData, }; + use rstest::rstest; const BLOCK_UNDER_PRODUCTION: BlockNumber = 128; - // For test purposes, we always return this set of availability cores: + fn dummy_candidate_template() -> CandidateReceipt { + let empty_hash = PersistedValidationData::::default().hash(); + + let mut descriptor_template = dummy_candidate_descriptor(dummy_hash()); + descriptor_template.persisted_validation_data_hash = empty_hash; + CandidateReceipt { + descriptor: descriptor_template, + commitments_hash: CandidateCommitments::default().hash(), + } + } + + fn make_candidates( + core_count: usize, + expected_backed_indices: Vec, + ) -> (Vec, Vec) { + let candidate_template = dummy_candidate_template(); + let candidates: Vec<_> = std::iter::repeat(candidate_template) + .take(core_count) + .enumerate() + .map(|(idx, mut candidate)| { + candidate.descriptor.para_id = idx.into(); + candidate + }) + .collect(); + + let expected_backed = expected_backed_indices + .iter() + .map(|&idx| candidates[idx].clone()) + .map(|c| { + BackedCandidate::new( + CommittedCandidateReceipt { + descriptor: c.descriptor.clone(), + commitments: Default::default(), + }, + Vec::new(), + default_bitvec(MOCK_GROUP_SIZE), + None, + ) + }) + .collect(); + let candidate_hashes = candidates.into_iter().map(|c| c.hash()).collect(); + + (candidate_hashes, expected_backed) + } + + // For testing only one core assigned to a parachain, we return this set of availability cores: // // [ // 0: Free, @@ -273,7 +322,7 @@ mod select_candidates { // 10: Occupied(both next_up set, not available, timeout), // 11: Occupied(next_up_on_available and available, but different successor para_id) // ] - fn mock_availability_cores() -> Vec { + fn mock_availability_cores_one_per_para() -> Vec { use std::ops::Not; use CoreState::{Free, Scheduled}; @@ -292,6 +341,7 @@ mod select_candidates { build_occupied_core(4, |core| { core.next_up_on_available = Some(scheduled_core(4)); core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(41)); }), // 5: Occupied(next_up_on_time_out set but not timeout), build_occupied_core(5, |core| { @@ -307,12 +357,14 @@ mod select_candidates { build_occupied_core(7, |core| { core.next_up_on_time_out = Some(scheduled_core(7)); core.time_out_at = BLOCK_UNDER_PRODUCTION; + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(71)); }), // 8: Occupied(both next_up set, available), build_occupied_core(8, |core| { core.next_up_on_available = Some(scheduled_core(8)); core.next_up_on_time_out = Some(scheduled_core(8)); core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(81)); }), // 9: Occupied(both next_up set, not available, no timeout), build_occupied_core(9, |core| { @@ -324,6 +376,7 @@ mod select_candidates { core.next_up_on_available = Some(scheduled_core(10)); core.next_up_on_time_out = Some(scheduled_core(10)); core.time_out_at = BLOCK_UNDER_PRODUCTION; + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(101)); }), // 11: Occupied(next_up_on_available and available, but different successor para_id) build_occupied_core(11, |core| { @@ -333,20 +386,189 @@ mod select_candidates { ] } + // For test purposes with multiple possible cores assigned to a para, we always return this set + // of availability cores: + fn mock_availability_cores_multiple_per_para() -> Vec { + use std::ops::Not; + use CoreState::{Free, Scheduled}; + + vec![ + // 0: Free, + Free, + // 1: Scheduled(default), + Scheduled(scheduled_core(1)), + // 2: Occupied(no next_up set), + occupied_core(2), + // 3: Occupied(next_up_on_available set but not available), + build_occupied_core(3, |core| { + core.next_up_on_available = Some(scheduled_core(3)); + }), + // 4: Occupied(next_up_on_available set and available), + build_occupied_core(4, |core| { + core.next_up_on_available = Some(scheduled_core(4)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(41)); + }), + // 5: Occupied(next_up_on_time_out set but not timeout), + build_occupied_core(5, |core| { + core.next_up_on_time_out = Some(scheduled_core(5)); + }), + // 6: Occupied(next_up_on_time_out set and timeout but available), + build_occupied_core(6, |core| { + core.next_up_on_time_out = Some(scheduled_core(6)); + core.time_out_at = BLOCK_UNDER_PRODUCTION; + core.availability = core.availability.clone().not(); + }), + // 7: Occupied(next_up_on_time_out set and timeout and not available), + build_occupied_core(7, |core| { + core.next_up_on_time_out = Some(scheduled_core(7)); + core.time_out_at = BLOCK_UNDER_PRODUCTION; + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(71)); + }), + // 8: Occupied(both next_up set, available), + build_occupied_core(8, |core| { + core.next_up_on_available = Some(scheduled_core(8)); + core.next_up_on_time_out = Some(scheduled_core(8)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(81)); + }), + // 9: Occupied(both next_up set, not available, no timeout), + build_occupied_core(9, |core| { + core.next_up_on_available = Some(scheduled_core(9)); + core.next_up_on_time_out = Some(scheduled_core(9)); + }), + // 10: Occupied(both next_up set, not available, timeout), + build_occupied_core(10, |core| { + core.next_up_on_available = Some(scheduled_core(10)); + core.next_up_on_time_out = Some(scheduled_core(10)); + core.time_out_at = BLOCK_UNDER_PRODUCTION; + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(101)); + }), + // 11: Occupied(next_up_on_available and available, but different successor para_id) + build_occupied_core(11, |core| { + core.next_up_on_available = Some(scheduled_core(12)); + core.availability = core.availability.clone().not(); + }), + // 12-14: Occupied(next_up_on_available and available, same para_id). + build_occupied_core(12, |core| { + core.next_up_on_available = Some(scheduled_core(12)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(121)); + }), + build_occupied_core(12, |core| { + core.next_up_on_available = Some(scheduled_core(12)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(122)); + }), + build_occupied_core(12, |core| { + core.next_up_on_available = Some(scheduled_core(12)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(123)); + }), + // 15: Scheduled on same para_id as 12-14. + Scheduled(scheduled_core(12)), + // 16: Occupied(13, no next_up set, not available) + build_occupied_core(13, |core| { + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(131)); + }), + // 17: Occupied(13, no next_up set, available) + build_occupied_core(13, |core| { + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(132)); + }), + // 18: Occupied(13, next_up_on_available set to 13 but not available) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(13)); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(133)); + }), + // 19: Occupied(13, next_up_on_available set to 13 and available) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(13)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(134)); + }), + // 20: Occupied(13, next_up_on_time_out set to 13 but not timeout) + build_occupied_core(13, |core| { + core.next_up_on_time_out = Some(scheduled_core(13)); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(135)); + }), + // 21: Occupied(13, next_up_on_available set to 14 and available) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(14)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(136)); + }), + // 22: Occupied(13, next_up_on_available set to 14 but not available) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(14)); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(137)); + }), + // 23: Occupied(13, both next_up set to 14, available) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(14)); + core.next_up_on_time_out = Some(scheduled_core(14)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(138)); + }), + // 24: Occupied(13, both next_up set to 14, not available, timeout) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(14)); + core.next_up_on_time_out = Some(scheduled_core(14)); + core.time_out_at = BLOCK_UNDER_PRODUCTION; + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(1399)); + }), + // 25: Occupied(13, next_up_on_available and available, but successor para_id 15) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(15)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(139)); + }), + // 26: Occupied(15, next_up_on_available and available, but successor para_id 13) + build_occupied_core(15, |core| { + core.next_up_on_available = Some(scheduled_core(13)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(151)); + }), + // 27: Occupied(15, both next_up, both available and timed out) + build_occupied_core(15, |core| { + core.next_up_on_available = Some(scheduled_core(15)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(152)); + core.time_out_at = BLOCK_UNDER_PRODUCTION; + }), + // 28: Occupied(13, both next_up set to 13, not available) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(13)); + core.next_up_on_time_out = Some(scheduled_core(13)); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(1398)); + }), + // 29: Occupied(13, both next_up set to 13, not available, timeout) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(13)); + core.next_up_on_time_out = Some(scheduled_core(13)); + core.time_out_at = BLOCK_UNDER_PRODUCTION; + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(1397)); + }), + ] + } + async fn mock_overseer( mut receiver: mpsc::UnboundedReceiver, - expected: Vec, + mock_availability_cores: Vec, + mut expected: Vec, + mut expected_ancestors: HashMap, Ancestors>, prospective_parachains_mode: ProspectiveParachainsMode, ) { use ChainApiMessage::BlockNumber; use RuntimeApiMessage::Request; + let mut backed_iter = expected.clone().into_iter(); + + expected.sort_by_key(|c| c.candidate().descriptor.para_id); let mut candidates_iter = expected .iter() .map(|candidate| (candidate.hash(), candidate.descriptor().relay_parent)); - let mut backed_iter = expected.clone().into_iter(); - while let Some(from_job) = receiver.next().await { match from_job { AllMessages::ChainApi(BlockNumber(_relay_parent, tx)) => @@ -356,7 +578,7 @@ mod select_candidates { PersistedValidationDataReq(_para_id, _assumption, tx), )) => tx.send(Ok(Some(Default::default()))).unwrap(), AllMessages::RuntimeApi(Request(_parent_hash, AvailabilityCores(tx))) => - tx.send(Ok(mock_availability_cores())).unwrap(), + tx.send(Ok(mock_availability_cores.clone())).unwrap(), AllMessages::CandidateBacking(CandidateBackingMessage::GetBackedCandidates( hashes, sender, @@ -373,35 +595,71 @@ mod select_candidates { let _ = sender.send(response); }, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::GetBackableCandidates(_, _, count, _, tx), - ) => { - assert_eq!(count, 1); - - match prospective_parachains_mode { - ProspectiveParachainsMode::Enabled { .. } => { - let _ = - tx.send(candidates_iter.next().map_or_else(Vec::new, |c| vec![c])); - }, - ProspectiveParachainsMode::Disabled => - panic!("unexpected prospective parachains request"), - } + ProspectiveParachainsMessage::GetBackableCandidates( + _, + _para_id, + count, + actual_ancestors, + tx, + ), + ) => match prospective_parachains_mode { + ProspectiveParachainsMode::Enabled { .. } => { + assert!(count > 0); + let candidates = + (&mut candidates_iter).take(count as usize).collect::>(); + assert_eq!(candidates.len(), count as usize); + + if !expected_ancestors.is_empty() { + if let Some(expected_required_ancestors) = expected_ancestors.remove( + &(candidates + .clone() + .into_iter() + .take(actual_ancestors.len()) + .map(|(c_hash, _)| c_hash) + .collect::>()), + ) { + assert_eq!(expected_required_ancestors, actual_ancestors); + } else { + assert_eq!(actual_ancestors.len(), 0); + } + } + + let _ = tx.send(candidates); + }, + ProspectiveParachainsMode::Disabled => + panic!("unexpected prospective parachains request"), }, _ => panic!("Unexpected message: {:?}", from_job), } } + + if let ProspectiveParachainsMode::Enabled { .. } = prospective_parachains_mode { + assert_eq!(candidates_iter.next(), None); + } + assert_eq!(expected_ancestors.len(), 0); } - #[test] - fn can_succeed() { + #[rstest] + #[case(ProspectiveParachainsMode::Disabled)] + #[case(ProspectiveParachainsMode::Enabled {max_candidate_depth: 0, allowed_ancestry_len: 0})] + fn can_succeed(#[case] prospective_parachains_mode: ProspectiveParachainsMode) { test_harness( - |r| mock_overseer(r, Vec::new(), ProspectiveParachainsMode::Disabled), + |r| { + mock_overseer( + r, + Vec::new(), + Vec::new(), + HashMap::new(), + prospective_parachains_mode, + ) + }, |mut tx: TestSubsystemSender| async move { - let prospective_parachains_mode = ProspectiveParachainsMode::Disabled; select_candidates( &[], &[], &[], prospective_parachains_mode, + false, Default::default(), &mut tx, ) @@ -411,22 +669,22 @@ mod select_candidates { ) } - // this tests that only the appropriate candidates get selected. - // To accomplish this, we supply a candidate list containing one candidate per possible core; - // the candidate selection algorithm must filter them to the appropriate set - #[test] - fn selects_correct_candidates() { - let mock_cores = mock_availability_cores(); - - let empty_hash = PersistedValidationData::::default().hash(); - - let mut descriptor_template = dummy_candidate_descriptor(dummy_hash()); - descriptor_template.persisted_validation_data_hash = empty_hash; - let candidate_template = CandidateReceipt { - descriptor: descriptor_template, - commitments_hash: CandidateCommitments::default().hash(), - }; - + // Test candidate selection when prospective parachains mode is disabled. + // This tests that only the appropriate candidates get selected when prospective parachains mode + // is disabled. To accomplish this, we supply a candidate list containing one candidate per + // possible core; the candidate selection algorithm must filter them to the appropriate set + #[rstest] + // why those particular indices? see the comments on mock_availability_cores_*() functions. + #[case(mock_availability_cores_one_per_para(), vec![1, 4, 7, 8, 10], true)] + #[case(mock_availability_cores_one_per_para(), vec![1, 4, 7, 8, 10], false)] + #[case(mock_availability_cores_multiple_per_para(), vec![1, 4, 7, 8, 10, 12, 13, 14, 15], true)] + #[case(mock_availability_cores_multiple_per_para(), vec![1, 4, 7, 8, 10, 12, 13, 14, 15], false)] + fn test_in_subsystem_selection( + #[case] mock_cores: Vec, + #[case] expected_candidates: Vec, + #[case] elastic_scaling_mvp: bool, + ) { + let candidate_template = dummy_candidate_template(); let candidates: Vec<_> = std::iter::repeat(candidate_template) .take(mock_cores.len()) .enumerate() @@ -453,31 +711,43 @@ mod select_candidates { }) .collect(); - // why those particular indices? see the comments on mock_availability_cores() let expected_candidates: Vec<_> = - [1, 4, 7, 8, 10].iter().map(|&idx| candidates[idx].clone()).collect(); + expected_candidates.into_iter().map(|idx| candidates[idx].clone()).collect(); let prospective_parachains_mode = ProspectiveParachainsMode::Disabled; let expected_backed = expected_candidates .iter() - .map(|c| BackedCandidate { - candidate: CommittedCandidateReceipt { - descriptor: c.descriptor.clone(), - commitments: Default::default(), - }, - validity_votes: Vec::new(), - validator_indices: default_bitvec(MOCK_GROUP_SIZE), + .map(|c| { + BackedCandidate::new( + CommittedCandidateReceipt { + descriptor: c.descriptor().clone(), + commitments: Default::default(), + }, + Vec::new(), + default_bitvec(MOCK_GROUP_SIZE), + None, + ) }) .collect(); + let mock_cores_clone = mock_cores.clone(); test_harness( - |r| mock_overseer(r, expected_backed, prospective_parachains_mode), + |r| { + mock_overseer( + r, + mock_cores_clone, + expected_backed, + HashMap::new(), + prospective_parachains_mode, + ) + }, |mut tx: TestSubsystemSender| async move { - let result = select_candidates( + let result: Vec = select_candidates( &mock_cores, &[], &candidates, prospective_parachains_mode, + elastic_scaling_mvp, Default::default(), &mut tx, ) @@ -486,7 +756,7 @@ mod select_candidates { result.into_iter().for_each(|c| { assert!( - expected_candidates.iter().any(|c2| c.candidate.corresponds_to(c2)), + expected_candidates.iter().any(|c2| c.candidate().corresponds_to(c2)), "Failed to find candidate: {:?}", c, ) @@ -495,20 +765,24 @@ mod select_candidates { ) } - #[test] - fn selects_max_one_code_upgrade() { - let mock_cores = mock_availability_cores(); + #[rstest] + #[case(ProspectiveParachainsMode::Disabled)] + #[case(ProspectiveParachainsMode::Enabled {max_candidate_depth: 0, allowed_ancestry_len: 0})] + fn selects_max_one_code_upgrade( + #[case] prospective_parachains_mode: ProspectiveParachainsMode, + ) { + let mock_cores = mock_availability_cores_one_per_para(); let empty_hash = PersistedValidationData::::default().hash(); // why those particular indices? see the comments on mock_availability_cores() - // the first candidate with code is included out of [1, 4, 7, 8, 10]. - let cores = [1, 4, 7, 8, 10]; + // the first candidate with code is included out of [1, 4, 7, 8, 10, 12]. + let cores = [1, 4, 7, 8, 10, 12]; let cores_with_code = [1, 4, 8]; - let expected_cores = [1, 7, 10]; + let expected_cores = [1, 7, 10, 12]; - let committed_receipts: Vec<_> = (0..mock_cores.len()) + let committed_receipts: Vec<_> = (0..=mock_cores.len()) .map(|i| { let mut descriptor = dummy_candidate_descriptor(dummy_hash()); descriptor.para_id = i.into(); @@ -532,10 +806,13 @@ mod select_candidates { // Build possible outputs from select_candidates let backed_candidates: Vec<_> = committed_receipts .iter() - .map(|committed_receipt| BackedCandidate { - candidate: committed_receipt.clone(), - validity_votes: Vec::new(), - validator_indices: default_bitvec(MOCK_GROUP_SIZE), + .map(|committed_receipt| { + BackedCandidate::new( + committed_receipt.clone(), + Vec::new(), + default_bitvec(MOCK_GROUP_SIZE), + None, + ) }) .collect(); @@ -546,27 +823,36 @@ mod select_candidates { let expected_backed_filtered: Vec<_> = expected_cores.iter().map(|&idx| candidates[idx].clone()).collect(); - let prospective_parachains_mode = ProspectiveParachainsMode::Disabled; + let mock_cores_clone = mock_cores.clone(); test_harness( - |r| mock_overseer(r, expected_backed, prospective_parachains_mode), + |r| { + mock_overseer( + r, + mock_cores_clone, + expected_backed, + HashMap::new(), + prospective_parachains_mode, + ) + }, |mut tx: TestSubsystemSender| async move { let result = select_candidates( &mock_cores, &[], &candidates, prospective_parachains_mode, + false, Default::default(), &mut tx, ) .await .unwrap(); - assert_eq!(result.len(), 3); + assert_eq!(result.len(), 4); result.into_iter().for_each(|c| { assert!( - expected_backed_filtered.iter().any(|c2| c.candidate.corresponds_to(c2)), + expected_backed_filtered.iter().any(|c2| c.candidate().corresponds_to(c2)), "Failed to find candidate: {:?}", c, ) @@ -575,63 +861,214 @@ mod select_candidates { ) } - #[test] - fn request_from_prospective_parachains() { - let mock_cores = mock_availability_cores(); - let empty_hash = PersistedValidationData::::default().hash(); + #[rstest] + #[case(true)] + #[case(false)] + fn request_from_prospective_parachains_one_core_per_para(#[case] elastic_scaling_mvp: bool) { + let mock_cores = mock_availability_cores_one_per_para(); - let mut descriptor_template = dummy_candidate_descriptor(dummy_hash()); - descriptor_template.persisted_validation_data_hash = empty_hash; - let candidate_template = CandidateReceipt { - descriptor: descriptor_template, - commitments_hash: CandidateCommitments::default().hash(), - }; + // why those particular indices? see the comments on mock_availability_cores() + let expected_candidates: Vec<_> = vec![1, 4, 7, 8, 10, 12]; + let (candidates, expected_candidates) = + make_candidates(mock_cores.len() + 1, expected_candidates); - let candidates: Vec<_> = std::iter::repeat(candidate_template) - .take(mock_cores.len()) - .enumerate() - .map(|(idx, mut candidate)| { - candidate.descriptor.para_id = idx.into(); - candidate - }) - .collect(); + // Expect prospective parachains subsystem requests. + let prospective_parachains_mode = + ProspectiveParachainsMode::Enabled { max_candidate_depth: 0, allowed_ancestry_len: 0 }; + + let mut required_ancestors: HashMap, Ancestors> = HashMap::new(); + required_ancestors.insert( + vec![candidates[4]], + vec![CandidateHash(Hash::from_low_u64_be(41))].into_iter().collect(), + ); + required_ancestors.insert( + vec![candidates[8]], + vec![CandidateHash(Hash::from_low_u64_be(81))].into_iter().collect(), + ); + + let mock_cores_clone = mock_cores.clone(); + let expected_candidates_clone = expected_candidates.clone(); + test_harness( + |r| { + mock_overseer( + r, + mock_cores_clone, + expected_candidates_clone, + required_ancestors, + prospective_parachains_mode, + ) + }, + |mut tx: TestSubsystemSender| async move { + let result = select_candidates( + &mock_cores, + &[], + &[], + prospective_parachains_mode, + elastic_scaling_mvp, + Default::default(), + &mut tx, + ) + .await + .unwrap(); + + assert_eq!(result.len(), expected_candidates.len()); + result.into_iter().for_each(|c| { + assert!( + expected_candidates + .iter() + .any(|c2| c.candidate().corresponds_to(&c2.receipt())), + "Failed to find candidate: {:?}", + c, + ) + }); + }, + ) + } + + #[test] + fn request_from_prospective_parachains_multiple_cores_per_para_elastic_scaling_mvp() { + let mock_cores = mock_availability_cores_multiple_per_para(); // why those particular indices? see the comments on mock_availability_cores() let expected_candidates: Vec<_> = - [1, 4, 7, 8, 10].iter().map(|&idx| candidates[idx].clone()).collect(); + vec![1, 4, 7, 8, 10, 12, 12, 12, 12, 12, 13, 13, 13, 14, 14, 14, 15, 15]; // Expect prospective parachains subsystem requests. let prospective_parachains_mode = ProspectiveParachainsMode::Enabled { max_candidate_depth: 0, allowed_ancestry_len: 0 }; - let expected_backed = expected_candidates - .iter() - .map(|c| BackedCandidate { - candidate: CommittedCandidateReceipt { - descriptor: c.descriptor.clone(), - commitments: Default::default(), - }, - validity_votes: Vec::new(), - validator_indices: default_bitvec(MOCK_GROUP_SIZE), - }) - .collect(); + let (candidates, expected_candidates) = + make_candidates(mock_cores.len(), expected_candidates); + + let mut required_ancestors: HashMap, Ancestors> = HashMap::new(); + required_ancestors.insert( + vec![candidates[4]], + vec![CandidateHash(Hash::from_low_u64_be(41))].into_iter().collect(), + ); + required_ancestors.insert( + vec![candidates[8]], + vec![CandidateHash(Hash::from_low_u64_be(81))].into_iter().collect(), + ); + required_ancestors.insert( + [12, 12, 12].iter().map(|&idx| candidates[idx]).collect::>(), + vec![ + CandidateHash(Hash::from_low_u64_be(121)), + CandidateHash(Hash::from_low_u64_be(122)), + CandidateHash(Hash::from_low_u64_be(123)), + ] + .into_iter() + .collect(), + ); + required_ancestors.insert( + [13, 13, 13].iter().map(|&idx| candidates[idx]).collect::>(), + (131..=139) + .map(|num| CandidateHash(Hash::from_low_u64_be(num))) + .chain(std::iter::once(CandidateHash(Hash::from_low_u64_be(1398)))) + .collect(), + ); + + required_ancestors.insert( + [15, 15].iter().map(|&idx| candidates[idx]).collect::>(), + vec![ + CandidateHash(Hash::from_low_u64_be(151)), + CandidateHash(Hash::from_low_u64_be(152)), + ] + .into_iter() + .collect(), + ); + + let mock_cores_clone = mock_cores.clone(); + let expected_candidates_clone = expected_candidates.clone(); + test_harness( + |r| { + mock_overseer( + r, + mock_cores_clone, + expected_candidates, + required_ancestors, + prospective_parachains_mode, + ) + }, + |mut tx: TestSubsystemSender| async move { + let result = select_candidates( + &mock_cores, + &[], + &[], + prospective_parachains_mode, + true, + Default::default(), + &mut tx, + ) + .await + .unwrap(); + assert_eq!(result.len(), expected_candidates_clone.len()); + result.into_iter().for_each(|c| { + assert!( + expected_candidates_clone + .iter() + .any(|c2| c.candidate().corresponds_to(&c2.receipt())), + "Failed to find candidate: {:?}", + c, + ) + }); + }, + ) + } + + #[test] + fn request_from_prospective_parachains_multiple_cores_per_para_elastic_scaling_mvp_disabled() { + let mock_cores = mock_availability_cores_multiple_per_para(); + + // why those particular indices? see the comments on mock_availability_cores() + let expected_candidates: Vec<_> = vec![1, 4, 7, 8, 10]; + // Expect prospective parachains subsystem requests. + let prospective_parachains_mode = + ProspectiveParachainsMode::Enabled { max_candidate_depth: 0, allowed_ancestry_len: 0 }; + + let (candidates, expected_candidates) = + make_candidates(mock_cores.len(), expected_candidates); + + let mut required_ancestors: HashMap, Ancestors> = HashMap::new(); + required_ancestors.insert( + vec![candidates[4]], + vec![CandidateHash(Hash::from_low_u64_be(41))].into_iter().collect(), + ); + required_ancestors.insert( + vec![candidates[8]], + vec![CandidateHash(Hash::from_low_u64_be(81))].into_iter().collect(), + ); + + let mock_cores_clone = mock_cores.clone(); + let expected_candidates_clone = expected_candidates.clone(); test_harness( - |r| mock_overseer(r, expected_backed, prospective_parachains_mode), + |r| { + mock_overseer( + r, + mock_cores_clone, + expected_candidates, + required_ancestors, + prospective_parachains_mode, + ) + }, |mut tx: TestSubsystemSender| async move { let result = select_candidates( &mock_cores, &[], &[], prospective_parachains_mode, + false, Default::default(), &mut tx, ) .await .unwrap(); + assert_eq!(result.len(), expected_candidates_clone.len()); result.into_iter().for_each(|c| { assert!( - expected_candidates.iter().any(|c2| c.candidate.corresponds_to(c2)), + expected_candidates_clone + .iter() + .any(|c2| c.candidate().corresponds_to(&c2.receipt())), "Failed to find candidate: {:?}", c, ) @@ -642,18 +1079,11 @@ mod select_candidates { #[test] fn request_receipts_based_on_relay_parent() { - let mock_cores = mock_availability_cores(); - let empty_hash = PersistedValidationData::::default().hash(); - - let mut descriptor_template = dummy_candidate_descriptor(dummy_hash()); - descriptor_template.persisted_validation_data_hash = empty_hash; - let candidate_template = CandidateReceipt { - descriptor: descriptor_template, - commitments_hash: CandidateCommitments::default().hash(), - }; + let mock_cores = mock_availability_cores_one_per_para(); + let candidate_template = dummy_candidate_template(); let candidates: Vec<_> = std::iter::repeat(candidate_template) - .take(mock_cores.len()) + .take(mock_cores.len() + 1) .enumerate() .map(|(idx, mut candidate)| { candidate.descriptor.para_id = idx.into(); @@ -664,31 +1094,44 @@ mod select_candidates { // why those particular indices? see the comments on mock_availability_cores() let expected_candidates: Vec<_> = - [1, 4, 7, 8, 10].iter().map(|&idx| candidates[idx].clone()).collect(); + [1, 4, 7, 8, 10, 12].iter().map(|&idx| candidates[idx].clone()).collect(); // Expect prospective parachains subsystem requests. let prospective_parachains_mode = ProspectiveParachainsMode::Enabled { max_candidate_depth: 0, allowed_ancestry_len: 0 }; let expected_backed = expected_candidates .iter() - .map(|c| BackedCandidate { - candidate: CommittedCandidateReceipt { - descriptor: c.descriptor.clone(), - commitments: Default::default(), - }, - validity_votes: Vec::new(), - validator_indices: default_bitvec(MOCK_GROUP_SIZE), + .map(|c| { + BackedCandidate::new( + CommittedCandidateReceipt { + descriptor: c.descriptor().clone(), + commitments: Default::default(), + }, + Vec::new(), + default_bitvec(MOCK_GROUP_SIZE), + None, + ) }) .collect(); + let mock_cores_clone = mock_cores.clone(); test_harness( - |r| mock_overseer(r, expected_backed, prospective_parachains_mode), + |r| { + mock_overseer( + r, + mock_cores_clone, + expected_backed, + HashMap::new(), + prospective_parachains_mode, + ) + }, |mut tx: TestSubsystemSender| async move { let result = select_candidates( &mock_cores, &[], &[], prospective_parachains_mode, + false, Default::default(), &mut tx, ) @@ -697,7 +1140,7 @@ mod select_candidates { result.into_iter().for_each(|c| { assert!( - expected_candidates.iter().any(|c2| c.candidate.corresponds_to(c2)), + expected_candidates.iter().any(|c2| c.candidate().corresponds_to(c2)), "Failed to find candidate: {:?}", c, ) diff --git a/polkadot/node/core/pvf-checker/Cargo.toml b/polkadot/node/core/pvf-checker/Cargo.toml index 3f02c2fa74a8e09248a586187f342ff5db2e6146..f4f954e316c0b758a4cb1e94f80b729257349632 100644 --- a/polkadot/node/core/pvf-checker/Cargo.toml +++ b/polkadot/node/core/pvf-checker/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] futures = "0.3.21" -thiserror = "1.0.48" +thiserror = { workspace = true } gum = { package = "tracing-gum", path = "../../gum" } polkadot-node-primitives = { path = "../../primitives" } diff --git a/polkadot/node/core/pvf-checker/src/lib.rs b/polkadot/node/core/pvf-checker/src/lib.rs index ae0fae6b4f9f7e2a4924effd4c3395d60b456f2f..c00ec0d952f158f8df21de87ee9d7460d68218bc 100644 --- a/polkadot/node/core/pvf-checker/src/lib.rs +++ b/polkadot/node/core/pvf-checker/src/lib.rs @@ -415,7 +415,7 @@ async fn check_signing_credentials( gum::warn!( target: LOG_TARGET, relay_parent = ?leaf, - "error occured during requesting validators: {:?}", + "error occurred during requesting validators: {:?}", e ); return None @@ -508,7 +508,7 @@ async fn sign_and_submit_pvf_check_statement( target: LOG_TARGET, ?relay_parent, ?validation_code_hash, - "error occured during submitting a vote: {:?}", + "error occurred during submitting a vote: {:?}", e, ); }, diff --git a/polkadot/node/core/pvf/Cargo.toml b/polkadot/node/core/pvf/Cargo.toml index d7ea573cfd2f3e772a3af04c0b840ce94ecdf93a..9ed64b88ffde857c6abe171a7d1cad7dd66dd509 100644 --- a/polkadot/node/core/pvf/Cargo.toml +++ b/polkadot/node/core/pvf/Cargo.toml @@ -23,7 +23,7 @@ pin-project = "1.0.9" rand = "0.8.5" slotmap = "1.0" tempfile = "3.3.0" -thiserror = "1.0.31" +thiserror = { workspace = true } tokio = { version = "1.24.2", features = ["fs", "process"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } diff --git a/polkadot/node/core/pvf/common/Cargo.toml b/polkadot/node/core/pvf/common/Cargo.toml index 631d4335b522d661cb4335a966f09300df3ea047..56bad9792fa0b6accea6460025c64c18e8e1e5d8 100644 --- a/polkadot/node/core/pvf/common/Cargo.toml +++ b/polkadot/node/core/pvf/common/Cargo.toml @@ -15,7 +15,7 @@ cpu-time = "1.0.0" futures = "0.3.21" gum = { package = "tracing-gum", path = "../../../gum" } libc = "0.2.152" -thiserror = "1.0.31" +thiserror = { workspace = true } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } diff --git a/polkadot/node/core/pvf/common/src/error.rs b/polkadot/node/core/pvf/common/src/error.rs index f8faefc24e65aadb2c50d2375e30fc7d579d6d37..cf274044456f3ea2db6770fbd61b3319f6420996 100644 --- a/polkadot/node/core/pvf/common/src/error.rs +++ b/polkadot/node/core/pvf/common/src/error.rs @@ -16,6 +16,7 @@ use crate::prepare::{PrepareSuccess, PrepareWorkerSuccess}; use parity_scale_codec::{Decode, Encode}; +pub use sc_executor_common::error::Error as ExecuteError; /// Result of PVF preparation from a worker, with checksum of the compiled PVF and stats of the /// preparation if successful. diff --git a/polkadot/node/core/pvf/common/src/execute.rs b/polkadot/node/core/pvf/common/src/execute.rs index 6b3becf524d71fa2d4f4bf574e0a86d09a7a60d9..18c97b03cbcd6fccc03cb27cb9f9e3aba9c7cf2a 100644 --- a/polkadot/node/core/pvf/common/src/execute.rs +++ b/polkadot/node/core/pvf/common/src/execute.rs @@ -40,6 +40,9 @@ pub enum WorkerResponse { }, /// The candidate is invalid. InvalidCandidate(String), + /// Instantiation of the WASM module instance failed during an execution. + /// Possibly related to local issues or dirty node update. May be retried with re-preparation. + RuntimeConstruction(String), /// The job timed out. JobTimedOut, /// The job process has died. We must kill the worker just in case. @@ -68,6 +71,9 @@ pub enum JobResponse { /// The result of parachain validation. result_descriptor: ValidationResult, }, + /// A possibly transient runtime instantiation error happened during the execution; may be + /// retried with re-preparation + RuntimeConstruction(String), /// The candidate is invalid. InvalidCandidate(String), } @@ -81,6 +87,15 @@ impl JobResponse { Self::InvalidCandidate(format!("{}: {}", ctx, msg)) } } + + /// Creates a may retry response from a context `ctx` and a message `msg` (which can be empty). + pub fn runtime_construction(ctx: &'static str, msg: &str) -> Self { + if msg.is_empty() { + Self::RuntimeConstruction(ctx.to_string()) + } else { + Self::RuntimeConstruction(format!("{}: {}", ctx, msg)) + } + } } /// An unexpected error occurred in the execution job process. Because this comes from the job, diff --git a/polkadot/node/core/pvf/common/src/executor_interface.rs b/polkadot/node/core/pvf/common/src/executor_interface.rs index 4cd2f5c85eec53661fde7625bb50a1d195381beb..252e611db8a488cff776b5d7863c987318e7e013 100644 --- a/polkadot/node/core/pvf/common/src/executor_interface.rs +++ b/polkadot/node/core/pvf/common/src/executor_interface.rs @@ -16,6 +16,7 @@ //! Interface to the Substrate Executor +use crate::error::ExecuteError; use polkadot_primitives::{ executor_params::{DEFAULT_LOGICAL_STACK_MAX, DEFAULT_NATIVE_STACK_MAX}, ExecutorParam, ExecutorParams, @@ -23,7 +24,7 @@ use polkadot_primitives::{ use sc_executor_common::{ error::WasmError, runtime_blob::RuntimeBlob, - wasm_runtime::{HeapAllocStrategy, InvokeMethod, WasmModule as _}, + wasm_runtime::{HeapAllocStrategy, WasmModule as _}, }; use sc_executor_wasmtime::{Config, DeterministicStackLimit, Semantics, WasmtimeRuntime}; use sp_core::storage::{ChildInfo, TrackedStorageKey}; @@ -109,7 +110,7 @@ pub unsafe fn execute_artifact( compiled_artifact_blob: &[u8], executor_params: &ExecutorParams, params: &[u8], -) -> Result, String> { +) -> Result, ExecuteError> { let mut extensions = sp_externalities::Extensions::new(); extensions.register(sp_core::traits::ReadRuntimeVersionExt::new(ReadRuntimeVersion)); @@ -118,12 +119,11 @@ pub unsafe fn execute_artifact( match sc_executor::with_externalities_safe(&mut ext, || { let runtime = create_runtime_from_artifact_bytes(compiled_artifact_blob, executor_params)?; - runtime.new_instance()?.call(InvokeMethod::Export("validate_block"), params) + runtime.new_instance()?.call("validate_block", params) }) { Ok(Ok(ok)) => Ok(ok), Ok(Err(err)) | Err(err) => Err(err), } - .map_err(|err| format!("execute error: {:?}", err)) } /// Constructs the runtime for the given PVF, given the artifact bytes. diff --git a/polkadot/node/core/pvf/execute-worker/src/lib.rs b/polkadot/node/core/pvf/execute-worker/src/lib.rs index 0cfa5a786946c7428ea8954515fe03807a90da32..bd7e76010a6dfedb339fd2fc1c17aa7f4007babe 100644 --- a/polkadot/node/core/pvf/execute-worker/src/lib.rs +++ b/polkadot/node/core/pvf/execute-worker/src/lib.rs @@ -16,7 +16,9 @@ //! Contains the logic for executing PVFs. Used by the polkadot-execute-worker binary. -pub use polkadot_node_core_pvf_common::executor_interface::execute_artifact; +pub use polkadot_node_core_pvf_common::{ + error::ExecuteError, executor_interface::execute_artifact, +}; // NOTE: Initializing logging in e.g. tests will not have an effect in the workers, as they are // separate spawned processes. Run with e.g. `RUST_LOG=parachain::pvf-execute-worker=trace`. @@ -237,7 +239,9 @@ fn validate_using_artifact( // [`executor_interface::prepare`]. execute_artifact(compiled_artifact_blob, executor_params, params) } { - Err(err) => return JobResponse::format_invalid("execute", &err), + Err(ExecuteError::RuntimeConstruction(wasmerr)) => + return JobResponse::runtime_construction("execute", &wasmerr.to_string()), + Err(err) => return JobResponse::format_invalid("execute", &err.to_string()), Ok(d) => d, }; @@ -550,6 +554,8 @@ fn handle_parent_process( Ok(WorkerResponse::Ok { result_descriptor, duration: cpu_tv }) }, Ok(JobResponse::InvalidCandidate(err)) => Ok(WorkerResponse::InvalidCandidate(err)), + Ok(JobResponse::RuntimeConstruction(err)) => + Ok(WorkerResponse::RuntimeConstruction(err)), Err(job_error) => { gum::warn!( target: LOG_TARGET, diff --git a/polkadot/node/core/pvf/src/artifacts.rs b/polkadot/node/core/pvf/src/artifacts.rs index 78dfe71adaddcd8d917a639591c102e08ebb7e4b..6288755526d497812027f7bf04e11e5a67ab799e 100644 --- a/polkadot/node/core/pvf/src/artifacts.rs +++ b/polkadot/node/core/pvf/src/artifacts.rs @@ -238,6 +238,14 @@ impl Artifacts { .is_none()); } + /// Remove artifact by its id. + pub fn remove(&mut self, artifact_id: ArtifactId) -> Option<(ArtifactId, PathBuf)> { + self.inner.remove(&artifact_id).and_then(|state| match state { + ArtifactState::Prepared { path, .. } => Some((artifact_id, path)), + _ => None, + }) + } + /// Remove artifacts older than the given TTL and return id and path of the removed ones. pub fn prune(&mut self, artifact_ttl: Duration) -> Vec<(ArtifactId, PathBuf)> { let now = SystemTime::now(); diff --git a/polkadot/node/core/pvf/src/error.rs b/polkadot/node/core/pvf/src/error.rs index 80d41d5c64be1a5cec31aee084a0f536ad0380bf..8dc96305eadb8f3ced1231889f5fa6c5ffaeac57 100644 --- a/polkadot/node/core/pvf/src/error.rs +++ b/polkadot/node/core/pvf/src/error.rs @@ -86,6 +86,10 @@ pub enum PossiblyInvalidError { /// vote invalid. #[error("possibly invalid: job error: {0}")] JobError(String), + /// Instantiation of the WASM module instance failed during an execution. + /// Possibly related to local issues or dirty node update. May be retried with re-preparation. + #[error("possibly invalid: runtime construction: {0}")] + RuntimeConstruction(String), } impl From for ValidationError { diff --git a/polkadot/node/core/pvf/src/execute/mod.rs b/polkadot/node/core/pvf/src/execute/mod.rs index c6d9cf90fa289601f89b0aad307483a002f89427..365e98196cae29dcb9c5085708cfaf7ee8b26238 100644 --- a/polkadot/node/core/pvf/src/execute/mod.rs +++ b/polkadot/node/core/pvf/src/execute/mod.rs @@ -23,4 +23,4 @@ mod queue; mod worker_interface; -pub use queue::{start, PendingExecutionRequest, ToQueue}; +pub use queue::{start, FromQueue, PendingExecutionRequest, ToQueue}; diff --git a/polkadot/node/core/pvf/src/execute/queue.rs b/polkadot/node/core/pvf/src/execute/queue.rs index aa91d11781fc625993484fa64ff8ebca4d65aeb2..bdc3c7327b06079eaaeebf737ffa7ca2c8c8270c 100644 --- a/polkadot/node/core/pvf/src/execute/queue.rs +++ b/polkadot/node/core/pvf/src/execute/queue.rs @@ -25,7 +25,7 @@ use crate::{ InvalidCandidate, PossiblyInvalidError, ValidationError, LOG_TARGET, }; use futures::{ - channel::mpsc, + channel::{mpsc, oneshot}, future::BoxFuture, stream::{FuturesUnordered, StreamExt as _}, Future, FutureExt, @@ -54,6 +54,12 @@ pub enum ToQueue { Enqueue { artifact: ArtifactPathId, pending_execution_request: PendingExecutionRequest }, } +/// A response from queue. +#[derive(Debug)] +pub enum FromQueue { + RemoveArtifact { artifact: ArtifactId, reply_to: oneshot::Sender<()> }, +} + /// An execution request that should execute the PVF (known in the context) and send the results /// to the given result sender. #[derive(Debug)] @@ -137,6 +143,8 @@ struct Queue { /// The receiver that receives messages to the pool. to_queue_rx: mpsc::Receiver, + /// The sender to send messages back to validation host. + from_queue_tx: mpsc::UnboundedSender, // Some variables related to the current session. program_path: PathBuf, @@ -161,6 +169,7 @@ impl Queue { node_version: Option, security_status: SecurityStatus, to_queue_rx: mpsc::Receiver, + from_queue_tx: mpsc::UnboundedSender, ) -> Self { Self { metrics, @@ -170,6 +179,7 @@ impl Queue { node_version, security_status, to_queue_rx, + from_queue_tx, queue: VecDeque::new(), mux: Mux::new(), workers: Workers { @@ -301,7 +311,7 @@ async fn handle_mux(queue: &mut Queue, event: QueueEvent) { handle_worker_spawned(queue, idle, handle, job); }, QueueEvent::StartWork(worker, outcome, artifact_id, result_tx) => { - handle_job_finish(queue, worker, outcome, artifact_id, result_tx); + handle_job_finish(queue, worker, outcome, artifact_id, result_tx).await; }, } } @@ -327,42 +337,69 @@ fn handle_worker_spawned( /// If there are pending jobs in the queue, schedules the next of them onto the just freed up /// worker. Otherwise, puts back into the available workers list. -fn handle_job_finish( +async fn handle_job_finish( queue: &mut Queue, worker: Worker, outcome: Outcome, artifact_id: ArtifactId, result_tx: ResultSender, ) { - let (idle_worker, result, duration) = match outcome { + let (idle_worker, result, duration, sync_channel) = match outcome { Outcome::Ok { result_descriptor, duration, idle_worker } => { // TODO: propagate the soft timeout - (Some(idle_worker), Ok(result_descriptor), Some(duration)) + (Some(idle_worker), Ok(result_descriptor), Some(duration), None) }, Outcome::InvalidCandidate { err, idle_worker } => ( Some(idle_worker), Err(ValidationError::Invalid(InvalidCandidate::WorkerReportedInvalid(err))), None, + None, ), - Outcome::InternalError { err } => (None, Err(ValidationError::Internal(err)), None), + Outcome::RuntimeConstruction { err, idle_worker } => { + // The task for artifact removal is executed concurrently with + // the message to the host on the execution result. + let (result_tx, result_rx) = oneshot::channel(); + queue + .from_queue_tx + .unbounded_send(FromQueue::RemoveArtifact { + artifact: artifact_id.clone(), + reply_to: result_tx, + }) + .expect("from execute queue receiver is listened by the host; qed"); + ( + Some(idle_worker), + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::RuntimeConstruction( + err, + ))), + None, + Some(result_rx), + ) + }, + Outcome::InternalError { err } => (None, Err(ValidationError::Internal(err)), None, None), // Either the worker or the job timed out. Kill the worker in either case. Treated as // definitely-invalid, because if we timed out, there's no time left for a retry. Outcome::HardTimeout => - (None, Err(ValidationError::Invalid(InvalidCandidate::HardTimeout)), None), + (None, Err(ValidationError::Invalid(InvalidCandidate::HardTimeout)), None, None), // "Maybe invalid" errors (will retry). Outcome::WorkerIntfErr => ( None, Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)), None, + None, ), Outcome::JobDied { err } => ( None, Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousJobDeath(err))), None, + None, + ), + Outcome::JobError { err } => ( + None, + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::JobError(err))), + None, + None, ), - Outcome::JobError { err } => - (None, Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::JobError(err))), None), }; queue.metrics.execute_finished(); @@ -386,6 +423,12 @@ fn handle_job_finish( ); } + if let Some(sync_channel) = sync_channel { + // err means the sender is dropped (the artifact is already removed from the cache) + // so that's legitimate to ignore the result + let _ = sync_channel.await; + } + // First we send the result. It may fail due to the other end of the channel being dropped, // that's legitimate and we don't treat that as an error. let _ = result_tx.send(result); @@ -521,8 +564,10 @@ pub fn start( spawn_timeout: Duration, node_version: Option, security_status: SecurityStatus, -) -> (mpsc::Sender, impl Future) { +) -> (mpsc::Sender, mpsc::UnboundedReceiver, impl Future) { let (to_queue_tx, to_queue_rx) = mpsc::channel(20); + let (from_queue_tx, from_queue_rx) = mpsc::unbounded(); + let run = Queue::new( metrics, program_path, @@ -532,7 +577,8 @@ pub fn start( node_version, security_status, to_queue_rx, + from_queue_tx, ) .run(); - (to_queue_tx, run) + (to_queue_tx, from_queue_rx, run) } diff --git a/polkadot/node/core/pvf/src/execute/worker_interface.rs b/polkadot/node/core/pvf/src/execute/worker_interface.rs index 9f7738f00e699ab981d7fa4396fcd09d5e1a4abe..db81da118d7b5563ea7b4bec1b6c76bd3bf842e2 100644 --- a/polkadot/node/core/pvf/src/execute/worker_interface.rs +++ b/polkadot/node/core/pvf/src/execute/worker_interface.rs @@ -87,6 +87,10 @@ pub enum Outcome { /// a trap. Errors related to the preparation process are not expected to be encountered by the /// execution workers. InvalidCandidate { err: String, idle_worker: IdleWorker }, + /// The error is probably transient. It may be for example + /// because the artifact was prepared with a Wasmtime version different from the version + /// in the current execution environment. + RuntimeConstruction { err: String, idle_worker: IdleWorker }, /// The execution time exceeded the hard limit. The worker is terminated. HardTimeout, /// An I/O error happened during communication with the worker. This may mean that the worker @@ -193,6 +197,10 @@ pub async fn start_work( err, idle_worker: IdleWorker { stream, pid, worker_dir }, }, + WorkerResponse::RuntimeConstruction(err) => Outcome::RuntimeConstruction { + err, + idle_worker: IdleWorker { stream, pid, worker_dir }, + }, WorkerResponse::JobTimedOut => Outcome::HardTimeout, WorkerResponse::JobDied { err, job_pid: _ } => Outcome::JobDied { err }, WorkerResponse::JobError(err) => Outcome::JobError { err }, diff --git a/polkadot/node/core/pvf/src/host.rs b/polkadot/node/core/pvf/src/host.rs index ae9fdc7d2dea81ac85c8c4ad752fca6fbf9a6f55..8ec46f4b08f193082f1fcf8f6a08cae2ef261765 100644 --- a/polkadot/node/core/pvf/src/host.rs +++ b/polkadot/node/core/pvf/src/host.rs @@ -274,7 +274,7 @@ pub async fn start( from_prepare_pool, ); - let (to_execute_queue_tx, run_execute_queue) = execute::start( + let (to_execute_queue_tx, from_execute_queue_rx, run_execute_queue) = execute::start( metrics, config.execute_worker_program_path.to_owned(), config.cache_path.clone(), @@ -296,6 +296,7 @@ pub async fn start( to_prepare_queue_tx, from_prepare_queue_rx, to_execute_queue_tx, + from_execute_queue_rx, to_sweeper_tx, awaiting_prepare: AwaitingPrepare::default(), }) @@ -342,6 +343,8 @@ struct Inner { from_prepare_queue_rx: mpsc::UnboundedReceiver, to_execute_queue_tx: mpsc::Sender, + from_execute_queue_rx: mpsc::UnboundedReceiver, + to_sweeper_tx: mpsc::Sender, awaiting_prepare: AwaitingPrepare, @@ -358,6 +361,7 @@ async fn run( to_host_rx, from_prepare_queue_rx, mut to_prepare_queue_tx, + from_execute_queue_rx, mut to_execute_queue_tx, mut to_sweeper_tx, mut awaiting_prepare, @@ -384,10 +388,21 @@ async fn run( let mut to_host_rx = to_host_rx.fuse(); let mut from_prepare_queue_rx = from_prepare_queue_rx.fuse(); + let mut from_execute_queue_rx = from_execute_queue_rx.fuse(); loop { // biased to make it behave deterministically for tests. futures::select_biased! { + from_execute_queue_rx = from_execute_queue_rx.next() => { + let from_queue = break_if_fatal!(from_execute_queue_rx.ok_or(Fatal)); + let execute::FromQueue::RemoveArtifact { artifact, reply_to } = from_queue; + break_if_fatal!(handle_artifact_removal( + &mut to_sweeper_tx, + &mut artifacts, + artifact, + reply_to, + ).await); + }, () = cleanup_pulse.select_next_some() => { // `select_next_some` because we don't expect this to fail, but if it does, we // still don't fail. The trade-off is that the compiled cache will start growing @@ -861,6 +876,37 @@ async fn handle_cleanup_pulse( Ok(()) } +async fn handle_artifact_removal( + sweeper_tx: &mut mpsc::Sender, + artifacts: &mut Artifacts, + artifact_id: ArtifactId, + reply_to: oneshot::Sender<()>, +) -> Result<(), Fatal> { + let (artifact_id, path) = if let Some(artifact) = artifacts.remove(artifact_id) { + artifact + } else { + // if we haven't found the artifact by its id, + // it has been probably removed + // anyway with the randomness of the artifact name + // it is safe to ignore + return Ok(()); + }; + reply_to + .send(()) + .expect("the execute queue waits for the artifact remove confirmation; qed"); + // Thanks to the randomness of the artifact name (see + // `artifacts::generate_artifact_path`) there is no issue with any name conflict on + // future repreparation. + // So we can confirm the artifact removal already + gum::debug!( + target: LOG_TARGET, + validation_code_hash = ?artifact_id.code_hash, + "PVF pruning: pruning artifact by request from the execute queue", + ); + sweeper_tx.send(path).await.map_err(|_| Fatal)?; + Ok(()) +} + /// A simple task which sole purpose is to delete files thrown at it. async fn sweeper_task(mut sweeper_rx: mpsc::Receiver) { loop { @@ -968,6 +1014,8 @@ pub(crate) mod tests { to_prepare_queue_rx: mpsc::Receiver, from_prepare_queue_tx: mpsc::UnboundedSender, to_execute_queue_rx: mpsc::Receiver, + #[allow(unused)] + from_execute_queue_tx: mpsc::UnboundedSender, to_sweeper_rx: mpsc::Receiver, run: BoxFuture<'static, ()>, @@ -979,6 +1027,7 @@ pub(crate) mod tests { let (to_prepare_queue_tx, to_prepare_queue_rx) = mpsc::channel(10); let (from_prepare_queue_tx, from_prepare_queue_rx) = mpsc::unbounded(); let (to_execute_queue_tx, to_execute_queue_rx) = mpsc::channel(10); + let (from_execute_queue_tx, from_execute_queue_rx) = mpsc::unbounded(); let (to_sweeper_tx, to_sweeper_rx) = mpsc::channel(10); let run = run(Inner { @@ -989,6 +1038,7 @@ pub(crate) mod tests { to_prepare_queue_tx, from_prepare_queue_rx, to_execute_queue_tx, + from_execute_queue_rx, to_sweeper_tx, awaiting_prepare: AwaitingPrepare::default(), }) @@ -999,6 +1049,7 @@ pub(crate) mod tests { to_prepare_queue_rx, from_prepare_queue_tx, to_execute_queue_rx, + from_execute_queue_tx, to_sweeper_rx, run, } diff --git a/polkadot/node/core/pvf/tests/it/main.rs b/polkadot/node/core/pvf/tests/it/main.rs index bcc10749e746d05479997e161329bbc6b4c468ff..cdfbcd8e57855121a7128a0ed616df4f8ecb0cbd 100644 --- a/polkadot/node/core/pvf/tests/it/main.rs +++ b/polkadot/node/core/pvf/tests/it/main.rs @@ -21,13 +21,14 @@ use parity_scale_codec::Encode as _; #[cfg(all(feature = "ci-only-tests", target_os = "linux"))] use polkadot_node_core_pvf::SecurityStatus; use polkadot_node_core_pvf::{ - start, testing::build_workers_and_get_paths, Config, InvalidCandidate, Metrics, PrepareError, - PrepareJobKind, PvfPrepData, ValidationError, ValidationHost, JOB_TIMEOUT_WALL_CLOCK_FACTOR, + start, testing::build_workers_and_get_paths, Config, InvalidCandidate, Metrics, + PossiblyInvalidError, PrepareError, PrepareJobKind, PvfPrepData, ValidationError, + ValidationHost, JOB_TIMEOUT_WALL_CLOCK_FACTOR, }; use polkadot_parachain_primitives::primitives::{BlockData, ValidationParams, ValidationResult}; use polkadot_primitives::{ExecutorParam, ExecutorParams}; -use std::time::Duration; +use std::{io::Write, time::Duration}; use tokio::sync::Mutex; mod adder; @@ -352,10 +353,80 @@ async fn deleting_prepared_artifact_does_not_dispute() { ) .await; - match result { - Err(ValidationError::Invalid(InvalidCandidate::HardTimeout)) => {}, - r => panic!("{:?}", r), + assert_matches!(result, Err(ValidationError::Invalid(InvalidCandidate::HardTimeout))); +} + +// Test that corruption of a prepared artifact does not lead to a dispute when we try to execute it. +#[tokio::test] +async fn corrupted_prepared_artifact_does_not_dispute() { + let host = TestHost::new().await; + let cache_dir = host.cache_dir.path(); + + let _stats = host.precheck_pvf(halt::wasm_binary_unwrap(), Default::default()).await.unwrap(); + + // Manually corrupting the prepared artifact from disk. The in-memory artifacts table won't + // change. + let artifact_path = { + // Get the artifact path (asserting it exists). + let mut cache_dir: Vec<_> = std::fs::read_dir(cache_dir).unwrap().collect(); + // Should contain the artifact and the worker dir. + assert_eq!(cache_dir.len(), 2); + let mut artifact_path = cache_dir.pop().unwrap().unwrap(); + if artifact_path.path().is_dir() { + artifact_path = cache_dir.pop().unwrap().unwrap(); + } + + // Corrupt the artifact. + let mut f = std::fs::OpenOptions::new() + .write(true) + .truncate(true) + .open(artifact_path.path()) + .unwrap(); + f.write_all(b"corrupted wasm").unwrap(); + f.flush().unwrap(); + artifact_path + }; + + assert!(artifact_path.path().exists()); + + // Try to validate, artifact should get removed because of the corruption. + let result = host + .validate_candidate( + halt::wasm_binary_unwrap(), + ValidationParams { + block_data: BlockData(Vec::new()), + parent_head: Default::default(), + relay_parent_number: 1, + relay_parent_storage_root: Default::default(), + }, + Default::default(), + ) + .await; + + assert_matches!( + result, + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::RuntimeConstruction(_))) + ); + + // because of RuntimeConstruction we may retry + host.precheck_pvf(halt::wasm_binary_unwrap(), Default::default()).await.unwrap(); + + // The actual artifact removal is done concurrently + // with sending of the result of the execution + // it is not a problem for further re-preparation as + // artifact filenames are random + for _ in 1..5 { + if !artifact_path.path().exists() { + break; + } + tokio::time::sleep(Duration::from_secs(1)).await; } + + assert!( + !artifact_path.path().exists(), + "the corrupted artifact ({}) should be deleted by the host", + artifact_path.path().display() + ); } #[tokio::test] diff --git a/polkadot/node/core/runtime-api/src/lib.rs b/polkadot/node/core/runtime-api/src/lib.rs index 4bedfd827340bc60b0101f1c854f207705bc0b31..3ff1a35d0687b6c6dc2d0ae70c96caeefc727c8f 100644 --- a/polkadot/node/core/runtime-api/src/lib.rs +++ b/polkadot/node/core/runtime-api/src/lib.rs @@ -433,6 +433,7 @@ where .unwrap_or_else(|e| { gum::warn!( target: LOG_TARGET, + api = ?stringify!($api_name), "cannot query the runtime API version: {}", e, ); diff --git a/polkadot/node/gum/proc-macro/Cargo.toml b/polkadot/node/gum/proc-macro/Cargo.toml index 4c81c8314df94b06ec6a70d0d06442e23fae3b4b..70126b4f43367ce11a1a23d462392b513ca1c028 100644 --- a/polkadot/node/gum/proc-macro/Cargo.toml +++ b/polkadot/node/gum/proc-macro/Cargo.toml @@ -16,8 +16,8 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -syn = { version = "2.0.49", features = ["extra-traits", "full"] } -quote = "1.0.28" +syn = { features = ["extra-traits", "full"], workspace = true } +quote = { workspace = true } proc-macro2 = "1.0.56" proc-macro-crate = "3.0.0" expander = "2.0.0" diff --git a/polkadot/node/jaeger/Cargo.toml b/polkadot/node/jaeger/Cargo.toml index c8fd3d462834552e86c78a2ebb77c637b467bb4f..23ab8f8421084751fa61d48bd3aa2b346e5a75c1 100644 --- a/polkadot/node/jaeger/Cargo.toml +++ b/polkadot/node/jaeger/Cargo.toml @@ -17,7 +17,7 @@ polkadot-primitives = { path = "../../primitives" } polkadot-node-primitives = { path = "../primitives" } sc-network = { path = "../../../substrate/client/network" } sp-core = { path = "../../../substrate/primitives/core" } -thiserror = "1.0.48" +thiserror = { workspace = true } tokio = "1.24.2" log = { workspace = true, default-features = true } parity-scale-codec = { version = "3.6.1", default-features = false } diff --git a/polkadot/node/malus/src/variants/back_garbage_candidate.rs b/polkadot/node/malus/src/variants/back_garbage_candidate.rs index 82475d291422582f5197613d4f347baaf203b929..b939a2151e2359828da74d317c9e2bc0af2c6e0c 100644 --- a/polkadot/node/malus/src/variants/back_garbage_candidate.rs +++ b/polkadot/node/malus/src/variants/back_garbage_candidate.rs @@ -20,14 +20,13 @@ use polkadot_cli::{ service::{ - AuthorityDiscoveryApi, AuxStore, BabeApi, Block, Error, ExtendedOverseerGenArgs, - HeaderBackend, Overseer, OverseerConnector, OverseerGen, OverseerGenArgs, OverseerHandle, - ParachainHost, ProvideRuntimeApi, + AuxStore, Error, ExtendedOverseerGenArgs, Overseer, OverseerConnector, OverseerGen, + OverseerGenArgs, OverseerHandle, }, validator_overseer_builder, Cli, }; use polkadot_node_subsystem::SpawnGlue; -use polkadot_node_subsystem_types::DefaultSubsystemClient; +use polkadot_node_subsystem_types::{ChainApiBackend, RuntimeApiSubsystemClient}; use sp_core::traits::SpawnNamed; use crate::{ @@ -63,13 +62,9 @@ impl OverseerGen for BackGarbageCandidates { connector: OverseerConnector, args: OverseerGenArgs<'_, Spawner, RuntimeClient>, ext_args: Option, - ) -> Result< - (Overseer, Arc>>, OverseerHandle), - Error, - > + ) -> Result<(Overseer, Arc>, OverseerHandle), Error> where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, Spawner: 'static + SpawnNamed + Clone + Unpin, { let spawner = args.spawner.clone(); diff --git a/polkadot/node/malus/src/variants/common.rs b/polkadot/node/malus/src/variants/common.rs index 011fcc80e37331909434ccd2fa1bfa1e96f48f35..eb6988f81811687442a36ae3f8ec225de48dd9df 100644 --- a/polkadot/node/malus/src/variants/common.rs +++ b/polkadot/node/malus/src/variants/common.rs @@ -23,10 +23,6 @@ use crate::{ use polkadot_node_core_candidate_validation::find_validation_data; use polkadot_node_primitives::{InvalidCandidate, ValidationResult}; -use polkadot_node_subsystem::{ - messages::{CandidateValidationMessage, ValidationFailed}, - overseer, -}; use polkadot_primitives::{ CandidateCommitments, CandidateDescriptor, CandidateReceipt, PersistedValidationData, diff --git a/polkadot/node/malus/src/variants/dispute_finalized_candidates.rs b/polkadot/node/malus/src/variants/dispute_finalized_candidates.rs index b3555ba2f5beb3e45285bc443dee9a4d029b070b..7a95bdaead26e204bd4cd8b386ae02b5e12297f9 100644 --- a/polkadot/node/malus/src/variants/dispute_finalized_candidates.rs +++ b/polkadot/node/malus/src/variants/dispute_finalized_candidates.rs @@ -34,14 +34,13 @@ use futures::channel::oneshot; use polkadot_cli::{ service::{ - AuthorityDiscoveryApi, AuxStore, BabeApi, Block, Error, ExtendedOverseerGenArgs, - HeaderBackend, Overseer, OverseerConnector, OverseerGen, OverseerGenArgs, OverseerHandle, - ParachainHost, ProvideRuntimeApi, + AuxStore, Error, ExtendedOverseerGenArgs, Overseer, OverseerConnector, OverseerGen, + OverseerGenArgs, OverseerHandle, }, validator_overseer_builder, Cli, }; -use polkadot_node_subsystem::{messages::ApprovalVotingMessage, SpawnGlue}; -use polkadot_node_subsystem_types::{DefaultSubsystemClient, OverseerSignal}; +use polkadot_node_subsystem::SpawnGlue; +use polkadot_node_subsystem_types::{ChainApiBackend, OverseerSignal, RuntimeApiSubsystemClient}; use polkadot_node_subsystem_util::request_candidate_events; use polkadot_primitives::CandidateEvent; use sp_core::traits::SpawnNamed; @@ -237,13 +236,9 @@ impl OverseerGen for DisputeFinalizedCandidates { connector: OverseerConnector, args: OverseerGenArgs<'_, Spawner, RuntimeClient>, ext_args: Option, - ) -> Result< - (Overseer, Arc>>, OverseerHandle), - Error, - > + ) -> Result<(Overseer, Arc>, OverseerHandle), Error> where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, Spawner: 'static + SpawnNamed + Clone + Unpin, { gum::info!( diff --git a/polkadot/node/malus/src/variants/dispute_valid_candidates.rs b/polkadot/node/malus/src/variants/dispute_valid_candidates.rs index b9812cbb5012a4ff97e3f1ed146538147efda03b..a50fdce16e4ec41d6b9bea4f0190616f756d9193 100644 --- a/polkadot/node/malus/src/variants/dispute_valid_candidates.rs +++ b/polkadot/node/malus/src/variants/dispute_valid_candidates.rs @@ -24,14 +24,13 @@ use polkadot_cli::{ service::{ - AuthorityDiscoveryApi, AuxStore, BabeApi, Block, Error, ExtendedOverseerGenArgs, - HeaderBackend, Overseer, OverseerConnector, OverseerGen, OverseerGenArgs, OverseerHandle, - ParachainHost, ProvideRuntimeApi, + AuxStore, Error, ExtendedOverseerGenArgs, Overseer, OverseerConnector, OverseerGen, + OverseerGenArgs, OverseerHandle, }, validator_overseer_builder, Cli, }; use polkadot_node_subsystem::SpawnGlue; -use polkadot_node_subsystem_types::DefaultSubsystemClient; +use polkadot_node_subsystem_types::{ChainApiBackend, RuntimeApiSubsystemClient}; use sp_core::traits::SpawnNamed; // Filter wrapping related types. @@ -80,13 +79,9 @@ impl OverseerGen for DisputeValidCandidates { connector: OverseerConnector, args: OverseerGenArgs<'_, Spawner, RuntimeClient>, ext_args: Option, - ) -> Result< - (Overseer, Arc>>, OverseerHandle), - Error, - > + ) -> Result<(Overseer, Arc>, OverseerHandle), Error> where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, Spawner: 'static + SpawnNamed + Clone + Unpin, { let spawner = args.spawner.clone(); diff --git a/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs b/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs index 22b44ddd1dc359620b199118bebf0bb83b69dada..739ed40db362a2ae330f14b7a24cc77c1ab76159 100644 --- a/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs +++ b/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs @@ -25,14 +25,13 @@ use futures::channel::oneshot; use polkadot_cli::{ service::{ - AuthorityDiscoveryApi, AuxStore, BabeApi, Block, Error, ExtendedOverseerGenArgs, - HeaderBackend, Overseer, OverseerConnector, OverseerGen, OverseerGenArgs, OverseerHandle, - ParachainHost, ProvideRuntimeApi, + AuxStore, Error, ExtendedOverseerGenArgs, Overseer, OverseerConnector, OverseerGen, + OverseerGenArgs, OverseerHandle, }, validator_overseer_builder, Cli, }; use polkadot_node_primitives::{AvailableData, BlockData, PoV}; -use polkadot_node_subsystem_types::DefaultSubsystemClient; +use polkadot_node_subsystem_types::{ChainApiBackend, RuntimeApiSubsystemClient}; use polkadot_primitives::{CandidateDescriptor, CandidateReceipt}; use polkadot_node_subsystem_util::request_validators; @@ -52,7 +51,7 @@ use crate::{ // Import extra types relevant to the particular // subsystem. -use polkadot_node_subsystem::{messages::CandidateBackingMessage, SpawnGlue}; +use polkadot_node_subsystem::SpawnGlue; use std::sync::Arc; @@ -296,13 +295,9 @@ impl OverseerGen for SuggestGarbageCandidates { connector: OverseerConnector, args: OverseerGenArgs<'_, Spawner, RuntimeClient>, ext_args: Option, - ) -> Result< - (Overseer, Arc>>, OverseerHandle), - Error, - > + ) -> Result<(Overseer, Arc>, OverseerHandle), Error> where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, Spawner: 'static + SpawnNamed + Clone + Unpin, { gum::info!( diff --git a/polkadot/node/malus/src/variants/support_disabled.rs b/polkadot/node/malus/src/variants/support_disabled.rs index e25df56fd643cdde44b861973751b1cd23e25480..169c442db25b7426f44cdfbe2c4181233802a914 100644 --- a/polkadot/node/malus/src/variants/support_disabled.rs +++ b/polkadot/node/malus/src/variants/support_disabled.rs @@ -19,14 +19,13 @@ use polkadot_cli::{ service::{ - AuthorityDiscoveryApi, AuxStore, BabeApi, Block, Error, ExtendedOverseerGenArgs, - HeaderBackend, Overseer, OverseerConnector, OverseerGen, OverseerGenArgs, OverseerHandle, - ParachainHost, ProvideRuntimeApi, + AuxStore, Error, ExtendedOverseerGenArgs, Overseer, OverseerConnector, OverseerGen, + OverseerGenArgs, OverseerHandle, }, validator_overseer_builder, Cli, }; use polkadot_node_subsystem::SpawnGlue; -use polkadot_node_subsystem_types::DefaultSubsystemClient; +use polkadot_node_subsystem_types::{ChainApiBackend, RuntimeApiSubsystemClient}; use sp_core::traits::SpawnNamed; use crate::interceptor::*; @@ -50,13 +49,9 @@ impl OverseerGen for SupportDisabled { connector: OverseerConnector, args: OverseerGenArgs<'_, Spawner, RuntimeClient>, ext_args: Option, - ) -> Result< - (Overseer, Arc>>, OverseerHandle), - Error, - > + ) -> Result<(Overseer, Arc>, OverseerHandle), Error> where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, Spawner: 'static + SpawnNamed + Clone + Unpin, { validator_overseer_builder( diff --git a/polkadot/node/network/availability-distribution/Cargo.toml b/polkadot/node/network/availability-distribution/Cargo.toml index a1484429ed633a09974e230bcf5be118f7280e15..182d92cb163179eea85cafeb1c605763ac18cde1 100644 --- a/polkadot/node/network/availability-distribution/Cargo.toml +++ b/polkadot/node/network/availability-distribution/Cargo.toml @@ -21,7 +21,7 @@ polkadot-node-subsystem-util = { path = "../../subsystem-util" } polkadot-node-primitives = { path = "../../primitives" } sp-core = { path = "../../../../substrate/primitives/core", features = ["std"] } sp-keystore = { path = "../../../../substrate/primitives/keystore" } -thiserror = "1.0.48" +thiserror = { workspace = true } rand = "0.8.5" derive_more = "0.99.17" schnellru = "0.2.1" @@ -36,3 +36,14 @@ sc-network = { path = "../../../../substrate/client/network" } futures-timer = "3.0.2" assert_matches = "1.4.0" polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } +polkadot-subsystem-bench = { path = "../../subsystem-bench" } + + +[[test]] +name = "availability-distribution-regression-bench" +path = "tests/availability-distribution-regression-bench.rs" +harness = false +required-features = ["subsystem-benchmarks"] + +[features] +subsystem-benchmarks = [] diff --git a/polkadot/node/network/availability-distribution/tests/availability-distribution-regression-bench.rs b/polkadot/node/network/availability-distribution/tests/availability-distribution-regression-bench.rs new file mode 100644 index 0000000000000000000000000000000000000000..7a490d5e47f74103634e2ed0266e5230a3842b8b --- /dev/null +++ b/polkadot/node/network/availability-distribution/tests/availability-distribution-regression-bench.rs @@ -0,0 +1,104 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! availability-read regression tests +//! +//! Availability read benchmark based on Kusama parameters and scale. +//! +//! Subsystems involved: +//! - availability-distribution +//! - bitfield-distribution +//! - availability-store + +use polkadot_subsystem_bench::{ + availability::{benchmark_availability_write, prepare_test, TestDataAvailability, TestState}, + configuration::TestConfiguration, + usage::BenchmarkUsage, +}; + +const BENCH_COUNT: usize = 3; +const WARM_UP_COUNT: usize = 30; +const WARM_UP_PRECISION: f64 = 0.01; + +fn main() -> Result<(), String> { + let mut messages = vec![]; + let mut config = TestConfiguration::default(); + // A single node effort roughly n_cores * needed_approvals / n_validators = 60 * 30 / 300 + config.n_cores = 6; + config.num_blocks = 3; + config.generate_pov_sizes(); + + warm_up(config.clone())?; + let usage = benchmark(config.clone()); + + messages.extend(usage.check_network_usage(&[ + ("Received from peers", 4330.0, 0.05), + ("Sent to peers", 15900.0, 0.05), + ])); + messages.extend(usage.check_cpu_usage(&[ + ("availability-distribution", 0.025, 0.05), + ("bitfield-distribution", 0.085, 0.05), + ("availability-store", 0.180, 0.05), + ])); + + if messages.is_empty() { + Ok(()) + } else { + eprintln!("{}", messages.join("\n")); + Err("Regressions found".to_string()) + } +} + +fn warm_up(config: TestConfiguration) -> Result<(), String> { + println!("Warming up..."); + let mut prev_run: Option = None; + for _ in 0..WARM_UP_COUNT { + let curr = run(config.clone()); + if let Some(ref prev) = prev_run { + let av_distr_diff = + curr.cpu_usage_diff(prev, "availability-distribution").expect("Must exist"); + let bitf_distr_diff = + curr.cpu_usage_diff(prev, "bitfield-distribution").expect("Must exist"); + let av_store_diff = + curr.cpu_usage_diff(prev, "availability-store").expect("Must exist"); + if av_distr_diff < WARM_UP_PRECISION && + bitf_distr_diff < WARM_UP_PRECISION && + av_store_diff < WARM_UP_PRECISION + { + return Ok(()) + } + } + prev_run = Some(curr); + } + + Err("Can't warm up".to_string()) +} + +fn benchmark(config: TestConfiguration) -> BenchmarkUsage { + println!("Benchmarking..."); + let usages: Vec = (0..BENCH_COUNT).map(|_| run(config.clone())).collect(); + let usage = BenchmarkUsage::average(&usages); + println!("{}", usage); + usage +} + +fn run(config: TestConfiguration) -> BenchmarkUsage { + let mut state = TestState::new(&config); + let (mut env, _protocol_config) = + prepare_test(config.clone(), &mut state, TestDataAvailability::Write, false); + env.runtime() + .block_on(benchmark_availability_write("data_availability_write", &mut env, state)) +} diff --git a/polkadot/node/network/availability-recovery/Cargo.toml b/polkadot/node/network/availability-recovery/Cargo.toml index e86445730ffcf9e8006d806aa5ea833eb46b441a..12b6ce7a05715480bc6ca79d67742bc089b5f67c 100644 --- a/polkadot/node/network/availability-recovery/Cargo.toml +++ b/polkadot/node/network/availability-recovery/Cargo.toml @@ -15,7 +15,7 @@ tokio = "1.24.2" schnellru = "0.2.1" rand = "0.8.5" fatality = "0.0.6" -thiserror = "1.0.48" +thiserror = { workspace = true } async-trait = "0.1.74" gum = { package = "tracing-gum", path = "../../gum" } @@ -41,6 +41,13 @@ sc-network = { path = "../../../../substrate/client/network" } polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } +polkadot-subsystem-bench = { path = "../../subsystem-bench" } + +[[test]] +name = "availability-recovery-regression-bench" +path = "tests/availability-recovery-regression-bench.rs" +harness = false +required-features = ["subsystem-benchmarks"] [features] subsystem-benchmarks = [] diff --git a/polkadot/node/network/availability-recovery/tests/availability-recovery-regression-bench.rs b/polkadot/node/network/availability-recovery/tests/availability-recovery-regression-bench.rs new file mode 100644 index 0000000000000000000000000000000000000000..30cc4d47ecc13048cd4326625a2156d7bbc4ddc9 --- /dev/null +++ b/polkadot/node/network/availability-recovery/tests/availability-recovery-regression-bench.rs @@ -0,0 +1,94 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! availability-write regression tests +//! +//! Availability write benchmark based on Kusama parameters and scale. +//! +//! Subsystems involved: +//! - availability-recovery + +use polkadot_subsystem_bench::{ + availability::{ + benchmark_availability_read, prepare_test, DataAvailabilityReadOptions, + TestDataAvailability, TestState, + }, + configuration::TestConfiguration, + usage::BenchmarkUsage, +}; + +const BENCH_COUNT: usize = 3; +const WARM_UP_COUNT: usize = 10; +const WARM_UP_PRECISION: f64 = 0.01; + +fn main() -> Result<(), String> { + let mut messages = vec![]; + + let options = DataAvailabilityReadOptions { fetch_from_backers: true }; + let mut config = TestConfiguration::default(); + config.num_blocks = 3; + config.generate_pov_sizes(); + + warm_up(config.clone(), options.clone())?; + let usage = benchmark(config.clone(), options.clone()); + + messages.extend(usage.check_network_usage(&[ + ("Received from peers", 102400.000, 0.05), + ("Sent to peers", 0.335, 0.05), + ])); + messages.extend(usage.check_cpu_usage(&[("availability-recovery", 3.850, 0.05)])); + + if messages.is_empty() { + Ok(()) + } else { + eprintln!("{}", messages.join("\n")); + Err("Regressions found".to_string()) + } +} + +fn warm_up(config: TestConfiguration, options: DataAvailabilityReadOptions) -> Result<(), String> { + println!("Warming up..."); + let mut prev_run: Option = None; + for _ in 0..WARM_UP_COUNT { + let curr = run(config.clone(), options.clone()); + if let Some(ref prev) = prev_run { + let diff = curr.cpu_usage_diff(prev, "availability-recovery").expect("Must exist"); + if diff < WARM_UP_PRECISION { + return Ok(()) + } + } + prev_run = Some(curr); + } + + Err("Can't warm up".to_string()) +} + +fn benchmark(config: TestConfiguration, options: DataAvailabilityReadOptions) -> BenchmarkUsage { + println!("Benchmarking..."); + let usages: Vec = + (0..BENCH_COUNT).map(|_| run(config.clone(), options.clone())).collect(); + let usage = BenchmarkUsage::average(&usages); + println!("{}", usage); + usage +} + +fn run(config: TestConfiguration, options: DataAvailabilityReadOptions) -> BenchmarkUsage { + let mut state = TestState::new(&config); + let (mut env, _protocol_config) = + prepare_test(config.clone(), &mut state, TestDataAvailability::Read(options), false); + env.runtime() + .block_on(benchmark_availability_read("data_availability_read", &mut env, state)) +} diff --git a/polkadot/node/network/bridge/Cargo.toml b/polkadot/node/network/bridge/Cargo.toml index 0cdd7bfbcb136bef97f856dc5fd3cb0a3cdf507e..2e889fc30eb90063f07c05e34e51d863a3d32f59 100644 --- a/polkadot/node/network/bridge/Cargo.toml +++ b/polkadot/node/network/bridge/Cargo.toml @@ -25,7 +25,7 @@ polkadot-overseer = { path = "../../overseer" } parking_lot = "0.12.1" bytes = "1" fatality = "0.0.6" -thiserror = "1" +thiserror = { workspace = true } [dev-dependencies] assert_matches = "1.4.0" diff --git a/polkadot/node/network/collator-protocol/Cargo.toml b/polkadot/node/network/collator-protocol/Cargo.toml index f88e8182eccad0db949117b4f89db589bf0785a8..f0f8be0f7bab240a928be9f30a18ca10b14eb16d 100644 --- a/polkadot/node/network/collator-protocol/Cargo.toml +++ b/polkadot/node/network/collator-protocol/Cargo.toml @@ -25,7 +25,7 @@ polkadot-node-primitives = { path = "../../primitives" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } polkadot-node-subsystem = { path = "../../subsystem" } fatality = "0.0.6" -thiserror = "1.0.48" +thiserror = { workspace = true } tokio-util = "0.7.1" [dev-dependencies] diff --git a/polkadot/node/network/collator-protocol/src/collator_side/validators_buffer.rs b/polkadot/node/network/collator-protocol/src/collator_side/validators_buffer.rs index 5b88efc99d8307594c5ac726d1af5ab141e4718d..1533f2eda5a57d332ea899e1dd81764f31c86dad 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/validators_buffer.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/validators_buffer.rs @@ -90,8 +90,7 @@ impl ValidatorGroupsBuffer { } } - /// Returns discovery ids of validators we have at least one advertised-but-not-fetched - /// collation for. + /// Returns discovery ids of validators we are assigned to in this backing group window. pub fn validators_to_connect(&self) -> Vec { let validators_num = self.validators.len(); let bits = self @@ -99,11 +98,22 @@ impl ValidatorGroupsBuffer { .values() .fold(bitvec![0; validators_num], |acc, next| acc | next); - self.validators + let mut should_be_connected: Vec = self + .validators .iter() .enumerate() .filter_map(|(idx, authority_id)| bits[idx].then_some(authority_id.clone())) - .collect() + .collect(); + + if let Some(last_group) = self.group_infos.iter().last() { + for validator in self.validators.iter().rev().take(last_group.len) { + if !should_be_connected.contains(validator) { + should_be_connected.push(validator.clone()); + } + } + } + + should_be_connected } /// Note a new advertisement, marking that we want to be connected to validators @@ -279,7 +289,7 @@ mod tests { assert_eq!(buf.validators_to_connect(), validators[..2].to_vec()); buf.reset_validator_interest(hash_a, &validators[1]); - assert_eq!(buf.validators_to_connect(), vec![validators[0].clone()]); + assert_eq!(buf.validators_to_connect(), validators[0..2].to_vec()); buf.note_collation_advertised(hash_b, 0, GroupIndex(1), &validators[2..]); assert_eq!(buf.validators_to_connect(), validators[2..].to_vec()); @@ -287,7 +297,11 @@ mod tests { for validator in &validators[2..] { buf.reset_validator_interest(hash_b, validator); } - assert!(buf.validators_to_connect().is_empty()); + let mut expected = validators[2..].to_vec(); + expected.sort(); + let mut result = buf.validators_to_connect(); + result.sort(); + assert_eq!(result, expected); } #[test] @@ -320,10 +334,18 @@ mod tests { } buf.reset_validator_interest(hashes[1], &validators[0]); - assert_eq!(buf.validators_to_connect(), validators[..2].to_vec()); + let mut expected: Vec<_> = validators[..4].iter().cloned().collect(); + let mut result = buf.validators_to_connect(); + expected.sort(); + result.sort(); + assert_eq!(result, expected); buf.reset_validator_interest(hashes[0], &validators[0]); - assert_eq!(buf.validators_to_connect(), vec![validators[1].clone()]); + let mut expected: Vec<_> = validators[1..4].iter().cloned().collect(); + expected.sort(); + let mut result = buf.validators_to_connect(); + result.sort(); + assert_eq!(result, expected); buf.note_collation_advertised(hashes[3], 0, GroupIndex(1), &validators[2..4]); buf.note_collation_advertised( diff --git a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs index 48ad3c711a6db9c5e9e09c26d1a6290ffbed60f4..a1b93fff348f2f0621e9859418e5ceffb6829028 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs @@ -1883,7 +1883,7 @@ async fn disconnect_inactive_peers( ) { for (peer, peer_data) in peers { if peer_data.is_inactive(&eviction_policy) { - gum::trace!(target: LOG_TARGET, "Disconnecting inactive peer"); + gum::trace!(target: LOG_TARGET, ?peer, "Disconnecting inactive peer"); disconnect_peer(sender, *peer).await; } } diff --git a/polkadot/node/network/dispute-distribution/Cargo.toml b/polkadot/node/network/dispute-distribution/Cargo.toml index 319b743aa586dfb5104eb2b1fe9f3aa042c2c13f..14d59d04f2bff7b838717edb927401035537ff4d 100644 --- a/polkadot/node/network/dispute-distribution/Cargo.toml +++ b/polkadot/node/network/dispute-distribution/Cargo.toml @@ -24,7 +24,7 @@ polkadot-node-primitives = { path = "../../primitives" } sc-network = { path = "../../../../substrate/client/network" } sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto" } sp-keystore = { path = "../../../../substrate/primitives/keystore" } -thiserror = "1.0.48" +thiserror = { workspace = true } fatality = "0.0.6" schnellru = "0.2.1" indexmap = "2.0.0" diff --git a/polkadot/node/network/gossip-support/src/lib.rs b/polkadot/node/network/gossip-support/src/lib.rs index 4dfdd1f7208f66ec4a787ffe8bc7f72c41575c9d..9f33cd5d8a31697f5d42030fc043643ef1ee247f 100644 --- a/polkadot/node/network/gossip-support/src/lib.rs +++ b/polkadot/node/network/gossip-support/src/lib.rs @@ -508,7 +508,7 @@ where ); } let pretty = PrettyAuthorities(unconnected_authorities); - gum::debug!( + gum::info!( target: LOG_TARGET, ?connected_ratio, ?absolute_connected, diff --git a/polkadot/node/network/protocol/Cargo.toml b/polkadot/node/network/protocol/Cargo.toml index a8b0cf2737eaa12ed3633aac8588ed6f19407ebf..7efa0b8ca820d651d77a53a95d61e7672c5f9737 100644 --- a/polkadot/node/network/protocol/Cargo.toml +++ b/polkadot/node/network/protocol/Cargo.toml @@ -21,7 +21,7 @@ sc-network = { path = "../../../../substrate/client/network" } sc-authority-discovery = { path = "../../../../substrate/client/authority-discovery" } strum = { version = "0.24", features = ["derive"] } futures = "0.3.21" -thiserror = "1.0.48" +thiserror = { workspace = true } fatality = "0.0.6" rand = "0.8" derive_more = "0.99" diff --git a/polkadot/node/network/statement-distribution/Cargo.toml b/polkadot/node/network/statement-distribution/Cargo.toml index b07d84390dcdd1e5413889c4749f187ce1ddf1ea..01f7d818c1c52ffd677379e3a0a9d2f543572d92 100644 --- a/polkadot/node/network/statement-distribution/Cargo.toml +++ b/polkadot/node/network/statement-distribution/Cargo.toml @@ -23,7 +23,7 @@ polkadot-node-network-protocol = { path = "../protocol" } arrayvec = "0.7.4" indexmap = "2.0.0" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } -thiserror = "1.0.48" +thiserror = { workspace = true } fatality = "0.0.6" bitvec = "1" diff --git a/polkadot/node/network/statement-distribution/src/v2/cluster.rs b/polkadot/node/network/statement-distribution/src/v2/cluster.rs index 619114de9670c8aef50ce129b6c26bf6ff206f70..c09916e56201f20a0fbbbac349681b512f1fd5fe 100644 --- a/polkadot/node/network/statement-distribution/src/v2/cluster.rs +++ b/polkadot/node/network/statement-distribution/src/v2/cluster.rs @@ -55,8 +55,9 @@ //! and to keep track of what we have sent to other validators in the group and what we may //! continue to send them. -use polkadot_primitives::{CandidateHash, CompactStatement, ValidatorIndex}; +use polkadot_primitives::{CandidateHash, CompactStatement, Hash, ValidatorIndex}; +use crate::LOG_TARGET; use std::collections::{HashMap, HashSet}; #[derive(Hash, PartialEq, Eq)] @@ -424,6 +425,28 @@ impl ClusterTracker { fn is_in_group(&self, validator: ValidatorIndex) -> bool { self.validators.contains(&validator) } + + /// Dumps pending statement for this cluster. + /// + /// Normally we should not have pending statements to validators in our cluster, + /// but if we do for all validators in our cluster, then we don't participate + /// in backing. Ocasional pending statements are expected if two authorities + /// can't detect each otehr or after restart, where it takes a while to discover + /// the whole network. + + pub fn warn_if_too_many_pending_statements(&self, parent_hash: Hash) { + if self.pending.iter().filter(|pending| !pending.1.is_empty()).count() >= + self.validators.len() + { + gum::warn!( + target: LOG_TARGET, + pending_statements = ?self.pending, + ?parent_hash, + "Cluster has too many pending statements, something wrong with our connection to our group peers \n + Restart might be needed if validator gets 0 backing rewards for more than 3-4 consecutive sessions" + ); + } + } } /// Incoming statement was accepted. diff --git a/polkadot/node/network/statement-distribution/src/v2/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs index dc29c19a48e333324c1aaa59f543d18e1ea33413..2c9cdba4ea8eb1155f9307eb01460a777ae9a737 100644 --- a/polkadot/node/network/statement-distribution/src/v2/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs @@ -251,6 +251,13 @@ impl PerSessionState { if local_index.is_some() { self.local_validator.get_or_insert(LocalValidatorIndex::Inactive); } + + gum::info!( + target: LOG_TARGET, + index_in_gossip_topology = ?local_index, + index_in_parachain_authorities = ?self.local_validator, + "Node uses the following topology indices" + ); } /// Returns `true` if local is neither active or inactive validator node. @@ -768,7 +775,15 @@ pub(crate) fn handle_deactivate_leaves(state: &mut State, leaves: &[Hash]) { let pruned = state.implicit_view.deactivate_leaf(*leaf); for pruned_rp in pruned { // clean up per-relay-parent data based on everything removed. - state.per_relay_parent.remove(&pruned_rp); + state + .per_relay_parent + .remove(&pruned_rp) + .as_ref() + .and_then(|pruned| pruned.active_validator_state()) + .map(|active_state| { + active_state.cluster_tracker.warn_if_too_many_pending_statements(pruned_rp) + }); + // clean up requests related to this relay parent. state.request_manager.remove_by_relay_parent(*leaf); } diff --git a/polkadot/node/network/statement-distribution/src/v2/requests.rs b/polkadot/node/network/statement-distribution/src/v2/requests.rs index bed3d5c18ae2b1007a3ee585f00a54d1b98f7c27..bbcb268415e67141ff1c620a134582d06bd8a67f 100644 --- a/polkadot/node/network/statement-distribution/src/v2/requests.rs +++ b/polkadot/node/network/statement-distribution/src/v2/requests.rs @@ -315,7 +315,16 @@ impl RequestManager { request_props: impl Fn(&CandidateIdentifier) -> Option, peer_advertised: impl Fn(&CandidateIdentifier, &PeerId) -> Option, ) -> Option> { - if response_manager.len() >= MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS as usize { + // The number of parallel requests a node can answer is limited by + // `MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS`, however there is no + // need for the current node to limit itself to the same amount the + // requests, because the requests are going to different nodes anyways. + // While looking at https://github.com/paritytech/polkadot-sdk/issues/3314, + // found out that this requests take around 100ms to fullfill, so it + // would make sense to try to request things as early as we can, given + // we would need to request it for each candidate, around 25 right now + // on kusama. + if response_manager.len() >= 2 * MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS as usize { return None } @@ -1027,6 +1036,7 @@ mod tests { let peer_advertised = |_identifier: &CandidateIdentifier, _peer: &_| { Some(StatementFilter::full(group_size)) }; + let outgoing = request_manager .next_request(&mut response_manager, request_props, peer_advertised) .unwrap(); @@ -1148,6 +1158,7 @@ mod tests { { let request_props = |_identifier: &CandidateIdentifier| Some((&request_properties).clone()); + let outgoing = request_manager .next_request(&mut response_manager, request_props, peer_advertised) .unwrap(); @@ -1230,6 +1241,7 @@ mod tests { { let request_props = |_identifier: &CandidateIdentifier| Some((&request_properties).clone()); + let outgoing = request_manager .next_request(&mut response_manager, request_props, peer_advertised) .unwrap(); diff --git a/polkadot/node/primitives/Cargo.toml b/polkadot/node/primitives/Cargo.toml index b5560929b2eb25ef5d63b6fcd1527a32d4adafd5..b4541bcc346c8ce4282947c38e0d9a2591631090 100644 --- a/polkadot/node/primitives/Cargo.toml +++ b/polkadot/node/primitives/Cargo.toml @@ -22,9 +22,9 @@ sp-maybe-compressed-blob = { path = "../../../substrate/primitives/maybe-compres sp-runtime = { path = "../../../substrate/primitives/runtime" } polkadot-parachain-primitives = { path = "../../parachain", default-features = false } schnorrkel = "0.11.4" -thiserror = "1.0.48" +thiserror = { workspace = true } bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } -serde = { version = "1.0.196", features = ["derive"] } +serde = { features = ["derive"], workspace = true, default-features = true } [target.'cfg(not(target_os = "unknown"))'.dependencies] zstd = { version = "0.12.4", default-features = false } diff --git a/polkadot/node/primitives/src/lib.rs b/polkadot/node/primitives/src/lib.rs index 6e3eefbcbe8c7bd6029c44e2cc43a2c30ea834f3..d295c21cce1dc9f609c8068fb806cff69612f7d6 100644 --- a/polkadot/node/primitives/src/lib.rs +++ b/polkadot/node/primitives/src/lib.rs @@ -58,7 +58,7 @@ pub use disputes::{ /// relatively rare. /// /// The associated worker binaries should use the same version as the node that spawns them. -pub const NODE_VERSION: &'static str = "1.7.0"; +pub const NODE_VERSION: &'static str = "1.8.0"; // For a 16-ary Merkle Prefix Trie, we can expect at most 16 32-byte hashes per node // plus some overhead: diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index dc6acd506c8400ce0dcec4d211dc9195304f2d05..734dcbdeb4411023cfbf540dda01a0b8a4799d95 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -85,14 +85,15 @@ is_executable = "1.0.1" gum = { package = "tracing-gum", path = "../gum" } log = { workspace = true, default-features = true } schnellru = "0.2.1" -serde = { version = "1.0.196", features = ["derive"] } -serde_json = "1.0.113" -thiserror = "1.0.48" +serde = { features = ["derive"], workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } +thiserror = { workspace = true } kvdb = "0.13.0" kvdb-rocksdb = { version = "0.19.0", optional = true } parity-db = { version = "0.4.12", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1" } parking_lot = "0.12.1" +bitvec = { version = "1.0.1", optional = true } # Polkadot polkadot-core-primitives = { path = "../../core-primitives" } @@ -184,8 +185,8 @@ full-node = [ ] # Configure the native runtimes to use. -westend-native = ["westend-runtime", "westend-runtime-constants"] -rococo-native = ["rococo-runtime", "rococo-runtime-constants"] +westend-native = ["bitvec", "westend-runtime", "westend-runtime-constants"] +rococo-native = ["bitvec", "rococo-runtime", "rococo-runtime-constants"] runtime-benchmarks = [ "frame-benchmarking-cli/runtime-benchmarks", diff --git a/polkadot/node/service/chain-specs/kusama.json b/polkadot/node/service/chain-specs/kusama.json index 979550c7570643380c5a2e0f0f5de7c049c01f85..fd60cb8b6c1d443d47a85afcb7b885b4cf0593ae 100644 --- a/polkadot/node/service/chain-specs/kusama.json +++ b/polkadot/node/service/chain-specs/kusama.json @@ -16,8 +16,8 @@ "/dns/boot-node.helikon.io/tcp/7062/wss/p2p/12D3KooWL4KPqfAsPE2aY1g5Zo1CxsDwcdJ7mmAghK7cg6M2fdbD", "/dns/kusama.bootnode.amforc.com/tcp/30333/p2p/12D3KooWLx6nsj6Fpd8biP1VDyuCUjazvRiGWyBam8PsqRJkbUb9", "/dns/kusama.bootnode.amforc.com/tcp/30334/wss/p2p/12D3KooWLx6nsj6Fpd8biP1VDyuCUjazvRiGWyBam8PsqRJkbUb9", - "/dns/kusama-bootnode.polkadotters.com/tcp/30333/p2p/12D3KooWHB5rTeNkQdXNJ9ynvGz8Lpnmsctt7Tvp7mrYv6bcwbPG", - "/dns/kusama-bootnode.polkadotters.com/tcp/30334/wss/p2p/12D3KooWHB5rTeNkQdXNJ9ynvGz8Lpnmsctt7Tvp7mrYv6bcwbPG", + "/dns/kusama.bootnodes.polkadotters.com/tcp/30311/p2p/12D3KooWHB5rTeNkQdXNJ9ynvGz8Lpnmsctt7Tvp7mrYv6bcwbPG", + "/dns/kusama.bootnodes.polkadotters.com/tcp/30313/wss/p2p/12D3KooWHB5rTeNkQdXNJ9ynvGz8Lpnmsctt7Tvp7mrYv6bcwbPG", "/dns/boot-cr.gatotech.network/tcp/33200/p2p/12D3KooWRNZXf99BfzQDE1C8YhuBbuy7Sj18UEf7FNpD8egbURYD", "/dns/boot-cr.gatotech.network/tcp/35200/wss/p2p/12D3KooWRNZXf99BfzQDE1C8YhuBbuy7Sj18UEf7FNpD8egbURYD", "/dns/boot-kusama.metaspan.io/tcp/23012/p2p/12D3KooWE1tq9ZL9AAxMiUBBqy1ENmh5pwfWabnoBPMo8gFPXhn6", diff --git a/polkadot/node/service/chain-specs/paseo.json b/polkadot/node/service/chain-specs/paseo.json index c8f2b58533c62af5ee3b71ffa5d782c852edd162..8db75ff137b0dbe338859a22ad7361f413c0fbea 100644 --- a/polkadot/node/service/chain-specs/paseo.json +++ b/polkadot/node/service/chain-specs/paseo.json @@ -12,7 +12,9 @@ "/dns/boot-node.helikon.io/tcp/10020/p2p/12D3KooWBetfzZpf6tGihKrqCo5z854Ub4ZNAUUTRT6eYHNh7FYi", "/dns/boot-node.helikon.io/tcp/10022/wss/p2p/12D3KooWBetfzZpf6tGihKrqCo5z854Ub4ZNAUUTRT6eYHNh7FYi", "/dns/pso16.rotko.net/tcp/33246/p2p/12D3KooWRH8eBMhw8c7bucy6pJfy94q4dKpLkF3pmeGohHmemdRu", - "/dns/pso16.rotko.net/tcp/35246/wss/p2p/12D3KooWRH8eBMhw8c7bucy6pJfy94q4dKpLkF3pmeGohHmemdRu" + "/dns/pso16.rotko.net/tcp/35246/wss/p2p/12D3KooWRH8eBMhw8c7bucy6pJfy94q4dKpLkF3pmeGohHmemdRu", + "/dns/paseo.bootnodes.polkadotters.com/tcp/30538/p2p/12D3KooWPbbFy4TefEGTRF5eTYhq8LEzc4VAHdNUVCbY4nAnhqPP", + "/dns/paseo.bootnodes.polkadotters.com/tcp/30540/wss/p2p/12D3KooWPbbFy4TefEGTRF5eTYhq8LEzc4VAHdNUVCbY4nAnhqPP" ], "telemetryEndpoints": [ [ diff --git a/polkadot/node/service/chain-specs/polkadot.json b/polkadot/node/service/chain-specs/polkadot.json index 71dbb9004038d1394cebe568627e1e6163ee9049..c235cdd09d8b6eee517909052556a02427495d09 100644 --- a/polkadot/node/service/chain-specs/polkadot.json +++ b/polkadot/node/service/chain-specs/polkadot.json @@ -17,8 +17,8 @@ "/dns/boot-node.helikon.io/tcp/7072/wss/p2p/12D3KooWS9ZcvRxyzrSf6p63QfTCWs12nLoNKhGux865crgxVA4H", "/dns/polkadot.bootnode.amforc.com/tcp/30333/p2p/12D3KooWAsuCEVCzUVUrtib8W82Yne3jgVGhQZN3hizko5FTnDg3", "/dns/polkadot.bootnode.amforc.com/tcp/30334/wss/p2p/12D3KooWAsuCEVCzUVUrtib8W82Yne3jgVGhQZN3hizko5FTnDg3", - "/dns/polkadot-bootnode.polkadotters.com/tcp/30333/p2p/12D3KooWPAVUgBaBk6n8SztLrMk8ESByncbAfRKUdxY1nygb9zG3", - "/dns/polkadot-bootnode.polkadotters.com/tcp/30334/wss/p2p/12D3KooWPAVUgBaBk6n8SztLrMk8ESByncbAfRKUdxY1nygb9zG3", + "/dns/polkadot.bootnodes.polkadotters.com/tcp/30314/p2p/12D3KooWPAVUgBaBk6n8SztLrMk8ESByncbAfRKUdxY1nygb9zG3", + "/dns/polkadot.bootnodes.polkadotters.com/tcp/30316/wss/p2p/12D3KooWPAVUgBaBk6n8SztLrMk8ESByncbAfRKUdxY1nygb9zG3", "/dns/boot-cr.gatotech.network/tcp/33100/p2p/12D3KooWK4E16jKk9nRhvC4RfrDVgcZzExg8Q3Q2G7ABUUitks1w", "/dns/boot-cr.gatotech.network/tcp/35100/wss/p2p/12D3KooWK4E16jKk9nRhvC4RfrDVgcZzExg8Q3Q2G7ABUUitks1w", "/dns/boot-polkadot.metaspan.io/tcp/13012/p2p/12D3KooWRjHFApinuqSBjoaDjQHvxwubQSpEVy5hrgC9Smvh92WF", diff --git a/polkadot/node/service/chain-specs/westend.json b/polkadot/node/service/chain-specs/westend.json index 697675871fcd7b4b11cac5a25e71c704d471cbde..775f3e72ac75578a0f0166cba96531244af760c9 100644 --- a/polkadot/node/service/chain-specs/westend.json +++ b/polkadot/node/service/chain-specs/westend.json @@ -14,8 +14,8 @@ "/dns/boot-node.helikon.io/tcp/7082/wss/p2p/12D3KooWRFDPyT8vA8mLzh6dJoyujn4QNjeqi6Ch79eSMz9beKXC", "/dns/westend.bootnode.amforc.com/tcp/30333/p2p/12D3KooWJ5y9ZgVepBQNW4aabrxgmnrApdVnscqgKWiUu4BNJbC8", "/dns/westend.bootnode.amforc.com/tcp/30334/wss/p2p/12D3KooWJ5y9ZgVepBQNW4aabrxgmnrApdVnscqgKWiUu4BNJbC8", - "/dns/westend-bootnode.polkadotters.com/tcp/30333/p2p/12D3KooWHPHb64jXMtSRJDrYFATWeLnvChL8NtWVttY67DCH1eC5", - "/dns/westend-bootnode.polkadotters.com/tcp/30334/wss/p2p/12D3KooWHPHb64jXMtSRJDrYFATWeLnvChL8NtWVttY67DCH1eC5", + "/dns/westend.bootnodes.polkadotters.com/tcp/30308/p2p/12D3KooWHPHb64jXMtSRJDrYFATWeLnvChL8NtWVttY67DCH1eC5", + "/dns/westend.bootnodes.polkadotters.com/tcp/30310/wss/p2p/12D3KooWHPHb64jXMtSRJDrYFATWeLnvChL8NtWVttY67DCH1eC5", "/dns/boot-cr.gatotech.network/tcp/33300/p2p/12D3KooWQGR1vUhoy6mvQorFp3bZFn6NNezhQZ6NWnVV7tpFgoPd", "/dns/boot-cr.gatotech.network/tcp/35300/wss/p2p/12D3KooWQGR1vUhoy6mvQorFp3bZFn6NNezhQZ6NWnVV7tpFgoPd", "/dns/boot-westend.metaspan.io/tcp/33012/p2p/12D3KooWNTau7iG4G9cUJSwwt2QJP1W88pUf2SgqsHjRU2RL8pfa", diff --git a/polkadot/node/service/src/chain_spec.rs b/polkadot/node/service/src/chain_spec.rs index af241d1cbc558c849bbf533dd644848b20fa4491..1c44b17b6fd24810bf896a64953a1b2f53b490da 100644 --- a/polkadot/node/service/src/chain_spec.rs +++ b/polkadot/node/service/src/chain_spec.rs @@ -24,6 +24,8 @@ use polkadot_primitives::{AccountId, AccountPublic, AssignmentId, ValidatorId}; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use sp_consensus_babe::AuthorityId as BabeId; +#[cfg(any(feature = "rococo-native", feature = "westend-native",))] +use polkadot_primitives::vstaging::SchedulerParams; #[cfg(feature = "rococo-native")] use rococo_runtime as rococo; #[cfg(feature = "rococo-native")] @@ -120,7 +122,9 @@ pub fn wococo_config() -> Result { fn default_parachains_host_configuration( ) -> polkadot_runtime_parachains::configuration::HostConfiguration { - use polkadot_primitives::{AsyncBackingParams, MAX_CODE_SIZE, MAX_POV_SIZE}; + use polkadot_primitives::{ + vstaging::node_features::FeatureIndex, AsyncBackingParams, MAX_CODE_SIZE, MAX_POV_SIZE, + }; polkadot_runtime_parachains::configuration::HostConfiguration { validation_upgrade_cooldown: 2u32, @@ -129,8 +133,6 @@ fn default_parachains_host_configuration( max_code_size: MAX_CODE_SIZE, max_pov_size: MAX_POV_SIZE, max_head_data_size: 32 * 1024, - group_rotation_frequency: 20, - paras_availability_period: 4, max_upward_queue_count: 8, max_upward_queue_size: 1024 * 1024, max_downward_message_size: 1024 * 1024, @@ -151,11 +153,19 @@ fn default_parachains_host_configuration( relay_vrf_modulo_samples: 2, zeroth_delay_tranche_width: 0, minimum_validation_upgrade_delay: 5, - scheduling_lookahead: 2, async_backing_params: AsyncBackingParams { max_candidate_depth: 3, allowed_ancestry_len: 2, }, + node_features: bitvec::vec::BitVec::from_element( + 1u8 << (FeatureIndex::ElasticScalingMVP as usize), + ), + scheduler_params: SchedulerParams { + lookahead: 2, + group_rotation_frequency: 20, + paras_availability_period: 4, + ..Default::default() + }, ..Default::default() } } @@ -891,7 +901,10 @@ pub fn rococo_testnet_genesis( "sudo": { "key": Some(root_key.clone()) }, "configuration": { "config": polkadot_runtime_parachains::configuration::HostConfiguration { - max_validators_per_core: Some(1), + scheduler_params: SchedulerParams { + max_validators_per_core: Some(1), + ..default_parachains_host_configuration().scheduler_params + }, ..default_parachains_host_configuration() }, }, diff --git a/polkadot/node/service/src/fake_runtime_api.rs b/polkadot/node/service/src/fake_runtime_api.rs index ccc3da22400dfc38f5e94aa4f6e89969499dbee8..085ea93fdc78526f793b726bf385124ec6418e5d 100644 --- a/polkadot/node/service/src/fake_runtime_api.rs +++ b/polkadot/node/service/src/fake_runtime_api.rs @@ -60,7 +60,7 @@ sp_api::impl_runtime_apis! { unimplemented!() } - fn initialize_block(_: &::Header) { + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { unimplemented!() } } diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index 6dcdec07ca84bd0fe9456e2bc29cbdf6cee6a9f8..83a0afc077e7edfb4a60fb8ed4109157f020695b 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -92,6 +92,7 @@ pub use chain_spec::{GenericChainSpec, RococoChainSpec, WestendChainSpec}; pub use consensus_common::{Proposal, SelectChain}; use frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE; use mmr_gadget::MmrGadget; +use polkadot_node_subsystem_types::DefaultSubsystemClient; pub use polkadot_primitives::{Block, BlockId, BlockNumber, CollatorPair, Hash, Id as ParaId}; pub use sc_client_api::{Backend, CallExecutor}; pub use sc_consensus::{BlockImport, LongestChain}; @@ -1081,12 +1082,17 @@ pub fn new_full( None }; + let runtime_client = Arc::new(DefaultSubsystemClient::new( + overseer_client.clone(), + OffchainTransactionPoolFactory::new(transaction_pool.clone()), + )); + let overseer_handle = if let Some(authority_discovery_service) = authority_discovery_service { let (overseer, overseer_handle) = overseer_gen - .generate::( + .generate::>( overseer_connector, OverseerGenArgs { - runtime_client: overseer_client.clone(), + runtime_client, network_service: network.clone(), sync_service: sync_service.clone(), authority_discovery_service, @@ -1099,9 +1105,6 @@ pub fn new_full( overseer_message_channel_capacity_override, req_protocol_names, peerset_protocol_names, - offchain_transaction_pool_factory: OffchainTransactionPoolFactory::new( - transaction_pool.clone(), - ), notification_services, }, ext_overseer_args, diff --git a/polkadot/node/service/src/overseer.rs b/polkadot/node/service/src/overseer.rs index b2b0786bfc24e5f702ddd82aec0676f0f9f795a8..a8167370464937ac91ae44056ea0b17b7184927a 100644 --- a/polkadot/node/service/src/overseer.rs +++ b/polkadot/node/service/src/overseer.rs @@ -14,10 +14,9 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use super::{AuthorityDiscoveryApi, Block, Error, Hash, IsParachainNode, Registry}; -use polkadot_node_subsystem_types::DefaultSubsystemClient; +use super::{Block, Error, Hash, IsParachainNode, Registry}; +use polkadot_node_subsystem_types::{ChainApiBackend, RuntimeApiSubsystemClient}; use polkadot_overseer::{DummySubsystem, InitializedOverseerBuilder, SubsystemError}; -use sc_transaction_pool_api::OffchainTransactionPoolFactory; use sp_core::traits::SpawnNamed; use polkadot_availability_distribution::IncomingRequestReceivers; @@ -40,14 +39,10 @@ use polkadot_overseer::{ }; use parking_lot::Mutex; -use polkadot_primitives::runtime_api::ParachainHost; use sc_authority_discovery::Service as AuthorityDiscoveryService; use sc_client_api::AuxStore; use sc_keystore::LocalKeystore; use sc_network::{NetworkStateInfo, NotificationService}; -use sp_api::ProvideRuntimeApi; -use sp_blockchain::HeaderBackend; -use sp_consensus_babe::BabeApi; use std::{collections::HashMap, sync::Arc}; pub use polkadot_approval_distribution::ApprovalDistribution as ApprovalDistributionSubsystem; @@ -80,8 +75,6 @@ pub use polkadot_statement_distribution::StatementDistributionSubsystem; /// Arguments passed for overseer construction. pub struct OverseerGenArgs<'a, Spawner, RuntimeClient> where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, Spawner: 'static + SpawnNamed + Clone + Unpin, { /// Runtime client generic, providing the `ProvieRuntimeApi` trait besides others. @@ -89,7 +82,7 @@ where /// Underlying network service implementation. pub network_service: Arc>, /// Underlying syncing service implementation. - pub sync_service: Arc>, + pub sync_service: Arc, /// Underlying authority discovery service. pub authority_discovery_service: AuthorityDiscoveryService, /// Collations request receiver for network protocol v1. @@ -111,8 +104,6 @@ where pub req_protocol_names: ReqProtocolNames, /// `PeerSet` protocol names to protocols mapping. pub peerset_protocol_names: PeerSetProtocolNames, - /// The offchain transaction pool factory. - pub offchain_transaction_pool_factory: OffchainTransactionPoolFactory, /// Notification services for validation/collation protocols. pub notification_services: HashMap>, } @@ -160,7 +151,6 @@ pub fn validator_overseer_builder( overseer_message_channel_capacity_override, req_protocol_names, peerset_protocol_names, - offchain_transaction_pool_factory, notification_services, }: OverseerGenArgs, ExtendedOverseerGenArgs { @@ -180,7 +170,7 @@ pub fn validator_overseer_builder( ) -> Result< InitializedOverseerBuilder< SpawnGlue, - Arc>, + Arc, CandidateValidationSubsystem, PvfCheckerSubsystem, CandidateBackingSubsystem, @@ -190,7 +180,7 @@ pub fn validator_overseer_builder( BitfieldSigningSubsystem, BitfieldDistributionSubsystem, ProvisionerSubsystem, - RuntimeApiSubsystem>, + RuntimeApiSubsystem, AvailabilityStoreSubsystem, NetworkBridgeRxSubsystem< Arc>, @@ -214,8 +204,7 @@ pub fn validator_overseer_builder( Error, > where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, Spawner: 'static + SpawnNamed + Clone + Unpin, { use polkadot_node_subsystem_util::metrics::Metrics; @@ -227,11 +216,6 @@ where let network_bridge_metrics: NetworkBridgeMetrics = Metrics::register(registry)?; - let runtime_api_client = Arc::new(DefaultSubsystemClient::new( - runtime_client.clone(), - offchain_transaction_pool_factory, - )); - let builder = Overseer::builder() .network_bridge_tx(NetworkBridgeTxSubsystem::new( network_service.clone(), @@ -298,7 +282,7 @@ where }) .provisioner(ProvisionerSubsystem::new(Metrics::register(registry)?)) .runtime_api(RuntimeApiSubsystem::new( - runtime_api_client.clone(), + runtime_client.clone(), Metrics::register(registry)?, spawner.clone(), )) @@ -339,7 +323,7 @@ where .activation_external_listeners(Default::default()) .span_per_active_leaf(Default::default()) .active_leaves(Default::default()) - .supports_parachains(runtime_api_client) + .supports_parachains(runtime_client) .metrics(metrics) .spawner(spawner); @@ -367,13 +351,12 @@ pub fn collator_overseer_builder( overseer_message_channel_capacity_override, req_protocol_names, peerset_protocol_names, - offchain_transaction_pool_factory, notification_services, }: OverseerGenArgs, ) -> Result< InitializedOverseerBuilder< SpawnGlue, - Arc>, + Arc, DummySubsystem, DummySubsystem, DummySubsystem, @@ -383,7 +366,7 @@ pub fn collator_overseer_builder( DummySubsystem, DummySubsystem, DummySubsystem, - RuntimeApiSubsystem>, + RuntimeApiSubsystem, DummySubsystem, NetworkBridgeRxSubsystem< Arc>, @@ -407,24 +390,17 @@ pub fn collator_overseer_builder( Error, > where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, Spawner: 'static + SpawnNamed + Clone + Unpin, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, { use polkadot_node_subsystem_util::metrics::Metrics; - let metrics = ::register(registry)?; let notification_sinks = Arc::new(Mutex::new(HashMap::new())); let spawner = SpawnGlue(spawner); let network_bridge_metrics: NetworkBridgeMetrics = Metrics::register(registry)?; - let runtime_api_client = Arc::new(DefaultSubsystemClient::new( - runtime_client.clone(), - offchain_transaction_pool_factory, - )); - let builder = Overseer::builder() .network_bridge_tx(NetworkBridgeTxSubsystem::new( network_service.clone(), @@ -475,7 +451,7 @@ where }) .provisioner(DummySubsystem) .runtime_api(RuntimeApiSubsystem::new( - runtime_api_client.clone(), + runtime_client.clone(), Metrics::register(registry)?, spawner.clone(), )) @@ -490,8 +466,8 @@ where .activation_external_listeners(Default::default()) .span_per_active_leaf(Default::default()) .active_leaves(Default::default()) - .supports_parachains(runtime_api_client) - .metrics(metrics) + .supports_parachains(runtime_client) + .metrics(Metrics::register(registry)?) .spawner(spawner); let builder = if let Some(capacity) = overseer_message_channel_capacity_override { @@ -510,13 +486,9 @@ pub trait OverseerGen { connector: OverseerConnector, args: OverseerGenArgs, ext_args: Option, - ) -> Result< - (Overseer, Arc>>, OverseerHandle), - Error, - > + ) -> Result<(Overseer, Arc>, OverseerHandle), Error> where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, Spawner: 'static + SpawnNamed + Clone + Unpin; // It would be nice to make `create_subsystems` part of this trait, @@ -533,13 +505,9 @@ impl OverseerGen for ValidatorOverseerGen { connector: OverseerConnector, args: OverseerGenArgs, ext_args: Option, - ) -> Result< - (Overseer, Arc>>, OverseerHandle), - Error, - > + ) -> Result<(Overseer, Arc>, OverseerHandle), Error> where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, Spawner: 'static + SpawnNamed + Clone + Unpin, { let ext_args = ext_args.ok_or(Error::Overseer(SubsystemError::Context( @@ -561,13 +529,9 @@ impl OverseerGen for CollatorOverseerGen { connector: OverseerConnector, args: OverseerGenArgs, _ext_args: Option, - ) -> Result< - (Overseer, Arc>>, OverseerHandle), - Error, - > + ) -> Result<(Overseer, Arc>, OverseerHandle), Error> where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, Spawner: 'static + SpawnNamed + Clone + Unpin, { collator_overseer_builder(args)? diff --git a/polkadot/node/service/src/parachains_db/upgrade.rs b/polkadot/node/service/src/parachains_db/upgrade.rs index d22eebb5c8d4edebdd2174f3cb1ba144fea0b130..2eceb391b15c278825b243fdb8dc21e8ef540390 100644 --- a/polkadot/node/service/src/parachains_db/upgrade.rs +++ b/polkadot/node/service/src/parachains_db/upgrade.rs @@ -93,7 +93,7 @@ pub(crate) fn try_upgrade_db( } /// Try upgrading parachain's database to the next version. -/// If successfull, it returns the current version. +/// If successful, it returns the current version. pub(crate) fn try_upgrade_db_to_next_version( db_path: &Path, db_kind: DatabaseKind, diff --git a/polkadot/node/subsystem-bench/Cargo.toml b/polkadot/node/subsystem-bench/Cargo.toml index 203cfe05db1b54e7190246c1f388f67ff512a521..7c60ff48269c41c291b1649fe58c75056cc50f9a 100644 --- a/polkadot/node/subsystem-bench/Cargo.toml +++ b/polkadot/node/subsystem-bench/Cargo.toml @@ -8,9 +8,13 @@ license.workspace = true readme = "README.md" publish = false +[lib] +name = "polkadot_subsystem_bench" +path = "src/lib/lib.rs" + [[bin]] name = "subsystem-bench" -path = "src/subsystem-bench.rs" +path = "src/cli/subsystem-bench.rs" # Prevent rustdoc error. Already documented from top-level Cargo.toml. doc = false @@ -65,8 +69,8 @@ itertools = "0.11.0" polkadot-primitives-test-helpers = { path = "../../primitives/test-helpers" } prometheus_endpoint = { package = "substrate-prometheus-endpoint", path = "../../../substrate/utils/prometheus" } prometheus = { version = "0.13.0", default-features = false } -serde = "1.0.196" -serde_yaml = "0.9" +serde = { workspace = true, default-features = true } +serde_yaml = { workspace = true } polkadot-node-core-approval-voting = { path = "../core/approval-voting" } polkadot-approval-distribution = { path = "../network/approval-distribution" } @@ -74,8 +78,9 @@ sp-consensus-babe = { path = "../../../substrate/primitives/consensus/babe" } sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } sp-timestamp = { path = "../../../substrate/primitives/timestamp" } -schnorrkel = { version = "0.9.1", default-features = false } -rand_core = "0.6.2" # should match schnorrkel +schnorrkel = { version = "0.11.4", default-features = false } +# rand_core should match schnorrkel +rand_core = "0.6.2" rand_chacha = { version = "0.3.1" } paste = "1.0.14" orchestra = { version = "0.3.5", default-features = false, features = ["futures_channel"] } diff --git a/polkadot/node/subsystem-bench/README.md b/polkadot/node/subsystem-bench/README.md index e090a0392cb733ffc2a4b21edc73da44e5aed708..94a449ed0bb8949816ac7aa5671883b6cb17100c 100644 --- a/polkadot/node/subsystem-bench/README.md +++ b/polkadot/node/subsystem-bench/README.md @@ -1,6 +1,6 @@ # Subsystem benchmark client -Run parachain consensus stress and performance tests on your development machine. +Run parachain consensus stress and performance tests on your development machine or in CI. ## Motivation @@ -32,7 +32,8 @@ a local Grafana/Prometheus stack is needed. ### Run Prometheus, Pyroscope and Graphana in Docker -If docker is not usable, then follow the next sections to manually install Prometheus, Pyroscope and Graphana on your machine. +If docker is not usable, then follow the next sections to manually install Prometheus, Pyroscope and Graphana +on your machine. ```bash cd polkadot/node/subsystem-bench/docker @@ -44,7 +45,7 @@ docker compose up Please follow the [official installation guide](https://prometheus.io/docs/prometheus/latest/installation/) for your platform/OS. -After succesfully installing and starting up Prometheus, we need to alter it's configuration such that it +After successfully installing and starting up Prometheus, we need to alter it's configuration such that it will scrape the benchmark prometheus endpoint `127.0.0.1:9999`. Please check the prometheus official documentation regarding the location of `prometheus.yml`. On MacOS for example the full path `/opt/homebrew/etc/prometheus.yml` @@ -95,39 +96,16 @@ If you are running the servers in Docker, use the following URLs: Follow [this guide](https://grafana.com/docs/grafana/latest/dashboards/manage-dashboards/#export-and-import-dashboards) to import the dashboards from the repository `grafana` folder. -## How to run a test - -To run a test, you need to first choose a test objective. Currently, we support the following: - -``` -target/testnet/subsystem-bench --help -The almighty Subsystem Benchmark Tool™️ - -Usage: subsystem-bench [OPTIONS] - -Commands: - data-availability-read Benchmark availability recovery strategies +### Standard test options ``` +$ subsystem-bench --help +Usage: subsystem-bench [OPTIONS] -Note: `test-sequence` is a special test objective that wraps up an arbitrary number of test objectives. It is tipically - used to run a suite of tests defined in a `yaml` file like in this [example](examples/availability_read.yaml). +Arguments: + Path to the test sequence configuration file -### Standard test options - -``` - --network The type of network to be emulated [default: ideal] [possible values: ideal, healthy, - degraded] - --n-cores Number of cores to fetch availability for [default: 100] - --n-validators Number of validators to fetch chunks from [default: 500] - --min-pov-size The minimum pov size in KiB [default: 5120] - --max-pov-size The maximum pov size bytes [default: 5120] - -n, --num-blocks The number of blocks the test is going to run [default: 1] - -p, --peer-bandwidth The bandwidth of emulated remote peers in KiB - -b, --bandwidth The bandwidth of our node in KiB - --connectivity Emulated peer connection ratio [0-100] - --peer-mean-latency Mean remote peer latency in milliseconds [0-5000] - --peer-latency-std-dev Remote peer latency standard deviation +Options: --profile Enable CPU Profiling with Pyroscope --pyroscope-url Pyroscope Server URL [default: http://localhost:4040] --pyroscope-sample-rate Pyroscope Sample Rate [default: 113] @@ -135,27 +113,17 @@ Note: `test-sequence` is a special test objective that wraps up an arbitrary num -h, --help Print help ``` -These apply to all test objectives, except `test-sequence` which relies on the values being specified in a file. - -### Test objectives - -Each test objective can have it's specific configuration options, in contrast with the standard test options. +## How to run a test -For `data-availability-read` the recovery strategy to be used is configurable. +To run a test, you need to use a path to a test objective: ``` -target/testnet/subsystem-bench data-availability-read --help -Benchmark availability recovery strategies - -Usage: subsystem-bench data-availability-read [OPTIONS] - -Options: - -f, --fetch-from-backers Turbo boost AD Read by fetching the full availability datafrom backers first. Saves CPU - as we don't need to re-construct from chunks. Tipically this is only faster if nodes - have enough bandwidth - -h, --help Print help +target/testnet/subsystem-bench polkadot/node/subsystem-bench/examples/availability_read.yaml ``` +Note: test objectives may be wrapped up into a test sequence. +It is tipically used to run a suite of tests like in this [example](examples/availability_read.yaml). + ### Understanding the test configuration A single test configuration `TestConfiguration` struct applies to a single run of a certain test objective. @@ -175,36 +143,65 @@ the test is started. ### Example run -Let's run an availabilty read test which will recover availability for 10 cores with max PoV size on a 500 +Let's run an availabilty read test which will recover availability for 200 cores with max PoV size on a 1000 node validator network. + + ``` - target/testnet/subsystem-bench --n-cores 10 data-availability-read -[2023-11-28T09:01:59Z INFO subsystem_bench::core::display] n_validators = 500, n_cores = 10, pov_size = 5120 - 5120, - latency = None -[2023-11-28T09:01:59Z INFO subsystem-bench::availability] Generating template candidate index=0 pov_size=5242880 -[2023-11-28T09:01:59Z INFO subsystem-bench::availability] Created test environment. -[2023-11-28T09:01:59Z INFO subsystem-bench::availability] Pre-generating 10 candidates. -[2023-11-28T09:02:01Z INFO subsystem-bench::core] Initializing network emulation for 500 peers. -[2023-11-28T09:02:01Z INFO substrate_prometheus_endpoint] 〽️ Prometheus exporter started at 127.0.0.1:9999 -[2023-11-28T09:02:01Z INFO subsystem-bench::availability] Current block 1/1 -[2023-11-28T09:02:01Z INFO subsystem_bench::availability] 10 recoveries pending -[2023-11-28T09:02:04Z INFO subsystem_bench::availability] Block time 3231ms -[2023-11-28T09:02:04Z INFO subsystem-bench::availability] Sleeping till end of block (2768ms) -[2023-11-28T09:02:07Z INFO subsystem_bench::availability] All blocks processed in 6001ms -[2023-11-28T09:02:07Z INFO subsystem_bench::availability] Throughput: 51200 KiB/block -[2023-11-28T09:02:07Z INFO subsystem_bench::availability] Block time: 6001 ms -[2023-11-28T09:02:07Z INFO subsystem_bench::availability] - - Total received from network: 66 MiB - Total sent to network: 58 KiB - Total subsystem CPU usage 4.16s - CPU usage per block 4.16s - Total test environment CPU usage 0.00s - CPU usage per block 0.00s +target/testnet/subsystem-bench polkadot/node/subsystem-bench/examples/availability_write.yaml +[2024-02-19T14:10:32.981Z INFO subsystem_bench] Sequence contains 1 step(s) +[2024-02-19T14:10:32.981Z INFO subsystem-bench::cli] Step 1/1 +[2024-02-19T14:10:32.981Z INFO subsystem-bench::cli] [objective = DataAvailabilityWrite] n_validators = 1000, n_cores = 200, pov_size = 5120 - 5120, connectivity = 75, latency = Some(PeerLatency { mean_latency_ms: 30, std_dev: 2.0 }) +[2024-02-19T14:10:32.982Z INFO subsystem-bench::availability] Generating template candidate index=0 pov_size=5242880 +[2024-02-19T14:10:33.106Z INFO subsystem-bench::availability] Created test environment. +[2024-02-19T14:10:33.106Z INFO subsystem-bench::availability] Pre-generating 600 candidates. +[2024-02-19T14:10:34.096Z INFO subsystem-bench::network] Initializing emulation for a 1000 peer network. +[2024-02-19T14:10:34.096Z INFO subsystem-bench::network] connectivity 75%, latency Some(PeerLatency { mean_latency_ms: 30, std_dev: 2.0 }) +[2024-02-19T14:10:34.098Z INFO subsystem-bench::network] Network created, connected validator count 749 +[2024-02-19T14:10:34.099Z INFO subsystem-bench::availability] Seeding availability store with candidates ... +[2024-02-19T14:10:34.100Z INFO substrate_prometheus_endpoint] 〽️ Prometheus exporter started at 127.0.0.1:9999 +[2024-02-19T14:10:34.387Z INFO subsystem-bench::availability] Done +[2024-02-19T14:10:34.387Z INFO subsystem-bench::availability] Current block #1 +[2024-02-19T14:10:34.389Z INFO subsystem-bench::availability] Waiting for all emulated peers to receive their chunk from us ... +[2024-02-19T14:10:34.625Z INFO subsystem-bench::availability] All chunks received in 237ms +[2024-02-19T14:10:34.626Z INFO polkadot_subsystem_bench::availability] Waiting for 749 bitfields to be received and processed +[2024-02-19T14:10:35.710Z INFO subsystem-bench::availability] All bitfields processed +[2024-02-19T14:10:35.710Z INFO subsystem-bench::availability] All work for block completed in 1322ms +[2024-02-19T14:10:35.710Z INFO subsystem-bench::availability] Current block #2 +[2024-02-19T14:10:35.712Z INFO subsystem-bench::availability] Waiting for all emulated peers to receive their chunk from us ... +[2024-02-19T14:10:35.947Z INFO subsystem-bench::availability] All chunks received in 236ms +[2024-02-19T14:10:35.947Z INFO polkadot_subsystem_bench::availability] Waiting for 749 bitfields to be received and processed +[2024-02-19T14:10:37.038Z INFO subsystem-bench::availability] All bitfields processed +[2024-02-19T14:10:37.038Z INFO subsystem-bench::availability] All work for block completed in 1328ms +[2024-02-19T14:10:37.039Z INFO subsystem-bench::availability] Current block #3 +[2024-02-19T14:10:37.040Z INFO subsystem-bench::availability] Waiting for all emulated peers to receive their chunk from us ... +[2024-02-19T14:10:37.276Z INFO subsystem-bench::availability] All chunks received in 237ms +[2024-02-19T14:10:37.276Z INFO polkadot_subsystem_bench::availability] Waiting for 749 bitfields to be received and processed +[2024-02-19T14:10:38.362Z INFO subsystem-bench::availability] All bitfields processed +[2024-02-19T14:10:38.362Z INFO subsystem-bench::availability] All work for block completed in 1323ms +[2024-02-19T14:10:38.362Z INFO subsystem-bench::availability] All blocks processed in 3974ms +[2024-02-19T14:10:38.362Z INFO subsystem-bench::availability] Avg block time: 1324 ms +[2024-02-19T14:10:38.362Z INFO parachain::availability-store] received `Conclude` signal, exiting +[2024-02-19T14:10:38.362Z INFO parachain::bitfield-distribution] Conclude +[2024-02-19T14:10:38.362Z INFO subsystem-bench::network] Downlink channel closed, network interface task exiting + +polkadot/node/subsystem-bench/examples/availability_write.yaml #1 DataAvailabilityWrite + +Network usage, KiB total per block +Received from peers 12922.000 4307.333 +Sent to peers 47705.000 15901.667 + +CPU usage, seconds total per block +availability-distribution 0.045 0.015 +bitfield-distribution 0.104 0.035 +availability-store 0.304 0.101 +Test environment 3.213 1.071 ``` -`Block time` in the context of `data-availability-read` has a different meaning. It measures the amount of time it + + +`Block time` in the current context has a different meaning. It measures the amount of time it took the subsystem to finish processing all of the messages sent in the context of the current test block. ### Test logs @@ -214,7 +211,7 @@ You can select log target, subtarget and verbosity just like with Polkadot node ### View test metrics -Assuming the Grafana/Prometheus stack installation steps completed succesfully, you should be able to +Assuming the Grafana/Prometheus stack installation steps completed successfully, you should be able to view the test progress in real time by accessing [this link](http://localhost:3000/goto/SM5B8pNSR?orgId=1). Now run @@ -233,8 +230,9 @@ Since the execution will be very slow, it's recommended not to run it together w benchmark results into account. A report is saved in a file `cachegrind_report.txt`. Example run results: + ``` -$ target/testnet/subsystem-bench --n-cores 10 --cache-misses data-availability-read +$ target/testnet/subsystem-bench --cache-misses cache-misses-data-availability-read.yaml $ cat cachegrind_report.txt I refs: 64,622,081,485 I1 misses: 3,018,168 @@ -275,7 +273,7 @@ happy and negative scenarios (low bandwidth, network errors and low connectivity To faster write a new test objective you need to use some higher level wrappers and logic: `TestEnvironment`, `TestConfiguration`, `TestAuthorities`, `NetworkEmulator`. To create the `TestEnvironment` you will -need to also build an `Overseer`, but that should be easy using the mockups for subsystems in`core::mock`. +need to also build an `Overseer`, but that should be easy using the mockups for subsystems in `mock`. ### Mocking diff --git a/polkadot/node/subsystem-bench/examples/approvals_no_shows.yaml b/polkadot/node/subsystem-bench/examples/approvals_no_shows.yaml index 758c7fbbf1121a249ff3094bc74967e3ad2420da..146da57d44c4aaf973e13c886a357028cdbe3559 100644 --- a/polkadot/node/subsystem-bench/examples/approvals_no_shows.yaml +++ b/polkadot/node/subsystem-bench/examples/approvals_no_shows.yaml @@ -1,14 +1,14 @@ TestConfiguration: # Test 1 - objective: !ApprovalVoting - last_considered_tranche: 89 coalesce_mean: 3.0 coalesce_std_dev: 1.0 + enable_assignments_v2: true + last_considered_tranche: 89 stop_when_approved: true coalesce_tranche_diff: 12 - workdir_prefix: "/tmp/" - enable_assignments_v2: true num_no_shows_per_candidate: 10 + workdir_prefix: "/tmp/" n_validators: 500 n_cores: 100 min_pov_size: 1120 diff --git a/polkadot/node/subsystem-bench/examples/approvals_throughput.yaml b/polkadot/node/subsystem-bench/examples/approvals_throughput.yaml index 9eeeefc53a4277aae51e44ee5940a2eb65111cd3..6b17e62c20aa3f69153fb596d1a303a2e0320ddd 100644 --- a/polkadot/node/subsystem-bench/examples/approvals_throughput.yaml +++ b/polkadot/node/subsystem-bench/examples/approvals_throughput.yaml @@ -7,11 +7,10 @@ TestConfiguration: last_considered_tranche: 89 stop_when_approved: false coalesce_tranche_diff: 12 - workdir_prefix: "/tmp" num_no_shows_per_candidate: 0 + workdir_prefix: "/tmp" n_validators: 500 n_cores: 100 - n_included_candidates: 100 min_pov_size: 1120 max_pov_size: 5120 peer_bandwidth: 524288000000 diff --git a/polkadot/node/subsystem-bench/examples/approvals_throughput_best_case.yaml b/polkadot/node/subsystem-bench/examples/approvals_throughput_best_case.yaml index 370bb31a5c4c102174c29382ab6de70ca336278e..e946c28e8ef5d4e38736ffc21e56d8b1c6cd0ddc 100644 --- a/polkadot/node/subsystem-bench/examples/approvals_throughput_best_case.yaml +++ b/polkadot/node/subsystem-bench/examples/approvals_throughput_best_case.yaml @@ -7,8 +7,8 @@ TestConfiguration: last_considered_tranche: 89 stop_when_approved: true coalesce_tranche_diff: 12 - workdir_prefix: "/tmp/" num_no_shows_per_candidate: 0 + workdir_prefix: "/tmp/" n_validators: 500 n_cores: 100 min_pov_size: 1120 diff --git a/polkadot/node/subsystem-bench/examples/approvals_throughput_no_optimisations_enabled.yaml b/polkadot/node/subsystem-bench/examples/approvals_throughput_no_optimisations_enabled.yaml index 30b9ac8dc50fec25bc4952ef3706f9878aa2bbd7..8f4b050e72f27dd4b5bb0c52bd49162cd0bb83ec 100644 --- a/polkadot/node/subsystem-bench/examples/approvals_throughput_no_optimisations_enabled.yaml +++ b/polkadot/node/subsystem-bench/examples/approvals_throughput_no_optimisations_enabled.yaml @@ -7,8 +7,8 @@ TestConfiguration: last_considered_tranche: 89 stop_when_approved: false coalesce_tranche_diff: 12 - workdir_prefix: "/tmp/" num_no_shows_per_candidate: 0 + workdir_prefix: "/tmp/" n_validators: 500 n_cores: 100 min_pov_size: 1120 diff --git a/polkadot/node/subsystem-bench/src/availability/cli.rs b/polkadot/node/subsystem-bench/src/availability/cli.rs deleted file mode 100644 index 65df8c1552aa8266497eb2738cc562f656050b68..0000000000000000000000000000000000000000 --- a/polkadot/node/subsystem-bench/src/availability/cli.rs +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -use serde::{Deserialize, Serialize}; - -#[derive(clap::ValueEnum, Clone, Copy, Debug, PartialEq)] -#[value(rename_all = "kebab-case")] -#[non_exhaustive] -pub enum NetworkEmulation { - Ideal, - Healthy, - Degraded, -} - -#[derive(Debug, Clone, Serialize, Deserialize, clap::Parser)] -#[clap(rename_all = "kebab-case")] -#[allow(missing_docs)] -pub struct DataAvailabilityReadOptions { - #[clap(short, long, default_value_t = false)] - /// Turbo boost AD Read by fetching the full availability datafrom backers first. Saves CPU as - /// we don't need to re-construct from chunks. Tipically this is only faster if nodes have - /// enough bandwidth. - pub fetch_from_backers: bool, -} diff --git a/polkadot/node/subsystem-bench/src/subsystem-bench.rs b/polkadot/node/subsystem-bench/src/cli/subsystem-bench.rs similarity index 62% rename from polkadot/node/subsystem-bench/src/subsystem-bench.rs rename to polkadot/node/subsystem-bench/src/cli/subsystem-bench.rs index 0803f175474e69a76640b12497acaaafa4cd5213..deb351360d74ede1ea78494a34f7c459dc544cdd 100644 --- a/polkadot/node/subsystem-bench/src/subsystem-bench.rs +++ b/polkadot/node/subsystem-bench/src/cli/subsystem-bench.rs @@ -14,54 +14,32 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! A tool for running subsystem benchmark tests designed for development and -//! CI regression testing. - -use approval::{bench_approvals, ApprovalsOptions}; -use availability::{ - cli::{DataAvailabilityReadOptions, NetworkEmulation}, - prepare_test, TestState, -}; +//! A tool for running subsystem benchmark tests +//! designed for development and CI regression testing. + use clap::Parser; -use clap_num::number_range; use color_eyre::eyre; use colored::Colorize; -use core::{ - configuration::TestConfiguration, - display::display_configuration, - environment::{TestEnvironment, GENESIS_HASH}, -}; +use polkadot_subsystem_bench::{approval, availability, configuration}; use pyroscope::PyroscopeAgent; use pyroscope_pprofrs::{pprof_backend, PprofConfig}; use serde::{Deserialize, Serialize}; use std::path::Path; -mod approval; -mod availability; -mod core; mod valgrind; -const LOG_TARGET: &str = "subsystem-bench"; - -fn le_100(s: &str) -> Result { - number_range(s, 0, 100) -} - -fn le_5000(s: &str) -> Result { - number_range(s, 0, 5000) -} +const LOG_TARGET: &str = "subsystem-bench::cli"; /// Supported test objectives #[derive(Debug, Clone, Parser, Serialize, Deserialize)] #[command(rename_all = "kebab-case")] pub enum TestObjective { /// Benchmark availability recovery strategies. - DataAvailabilityRead(DataAvailabilityReadOptions), + DataAvailabilityRead(availability::DataAvailabilityReadOptions), /// Benchmark availability and bitfield distribution. DataAvailabilityWrite, /// Benchmark the approval-voting and approval-distribution subsystems. - ApprovalVoting(ApprovalsOptions), - Unimplemented, + ApprovalVoting(approval::ApprovalsOptions), } impl std::fmt::Display for TestObjective { @@ -73,39 +51,37 @@ impl std::fmt::Display for TestObjective { Self::DataAvailabilityRead(_) => "DataAvailabilityRead", Self::DataAvailabilityWrite => "DataAvailabilityWrite", Self::ApprovalVoting(_) => "ApprovalVoting", - Self::Unimplemented => "Unimplemented", } ) } } -#[derive(Debug, Parser)] -#[allow(missing_docs)] -struct BenchCli { - #[arg(long, value_enum, ignore_case = true, default_value_t = NetworkEmulation::Ideal)] - /// The type of network to be emulated - pub network: NetworkEmulation, - - #[clap(short, long)] - /// The bandwidth of emulated remote peers in KiB - pub peer_bandwidth: Option, - - #[clap(short, long)] - /// The bandwidth of our node in KiB - pub bandwidth: Option, - - #[clap(long, value_parser=le_100)] - /// Emulated peer connection ratio [0-100]. - pub connectivity: Option, +/// The test input parameters +#[derive(Clone, Debug, Serialize, Deserialize)] +struct CliTestConfiguration { + /// Test Objective + pub objective: TestObjective, + /// Test Configuration + #[serde(flatten)] + pub test_config: configuration::TestConfiguration, +} - #[clap(long, value_parser=le_5000)] - /// Mean remote peer latency in milliseconds [0-5000]. - pub peer_mean_latency: Option, +#[derive(Serialize, Deserialize)] +pub struct TestSequence { + #[serde(rename(serialize = "TestConfiguration", deserialize = "TestConfiguration"))] + test_configurations: Vec, +} - #[clap(long, value_parser=le_5000)] - /// Remote peer latency standard deviation - pub peer_latency_std_dev: Option, +impl TestSequence { + fn new_from_file(path: &Path) -> std::io::Result { + let string = String::from_utf8(std::fs::read(path)?).expect("File is valid UTF8"); + Ok(serde_yaml::from_str(&string).expect("File is valid test sequence YA")) + } +} +#[derive(Debug, Parser)] +#[allow(missing_docs)] +struct BenchCli { #[clap(long, default_value_t = false)] /// Enable CPU Profiling with Pyroscope pub profile: bool, @@ -122,10 +98,6 @@ struct BenchCli { /// Enable Cache Misses Profiling with Valgrind. Linux only, Valgrind must be in the PATH pub cache_misses: bool, - #[clap(long, default_value_t = false)] - /// Shows the output in YAML format - pub yaml_output: bool, - #[arg(required = true)] /// Path to the test sequence configuration file pub path: String, @@ -148,49 +120,60 @@ impl BenchCli { None }; - let test_sequence = core::configuration::TestSequence::new_from_file(Path::new(&self.path)) + let test_sequence = TestSequence::new_from_file(Path::new(&self.path)) .expect("File exists") - .into_vec(); + .test_configurations; let num_steps = test_sequence.len(); gum::info!("{}", format!("Sequence contains {} step(s)", num_steps).bright_purple()); - for (index, test_config) in test_sequence.into_iter().enumerate() { - let benchmark_name = format!("{} #{} {}", &self.path, index + 1, test_config.objective); - gum::info!(target: LOG_TARGET, "{}", format!("Step {}/{}", index + 1, num_steps).bright_purple(),); - display_configuration(&test_config); - let usage = match test_config.objective { - TestObjective::DataAvailabilityRead(ref _opts) => { - let mut state = TestState::new(&test_config); - let (mut env, _protocol_config) = prepare_test(test_config, &mut state); + for (index, CliTestConfiguration { objective, mut test_config }) in + test_sequence.into_iter().enumerate() + { + let benchmark_name = format!("{} #{} {}", &self.path, index + 1, objective); + gum::info!(target: LOG_TARGET, "{}", format!("Step {}/{}", index + 1, num_steps).bright_purple(),); + gum::info!(target: LOG_TARGET, "[{}] {}", format!("objective = {:?}", objective).green(), test_config); + test_config.generate_pov_sizes(); + + let usage = match objective { + TestObjective::DataAvailabilityRead(opts) => { + let mut state = availability::TestState::new(&test_config); + let (mut env, _protocol_config) = availability::prepare_test( + test_config, + &mut state, + availability::TestDataAvailability::Read(opts), + true, + ); env.runtime().block_on(availability::benchmark_availability_read( &benchmark_name, &mut env, state, )) }, - TestObjective::ApprovalVoting(ref options) => { - let (mut env, state) = - approval::prepare_test(test_config.clone(), options.clone()); - env.runtime().block_on(bench_approvals(&benchmark_name, &mut env, state)) - }, TestObjective::DataAvailabilityWrite => { - let mut state = TestState::new(&test_config); - let (mut env, _protocol_config) = prepare_test(test_config, &mut state); + let mut state = availability::TestState::new(&test_config); + let (mut env, _protocol_config) = availability::prepare_test( + test_config, + &mut state, + availability::TestDataAvailability::Write, + true, + ); env.runtime().block_on(availability::benchmark_availability_write( &benchmark_name, &mut env, state, )) }, - TestObjective::Unimplemented => todo!(), - }; - - let output = if self.yaml_output { - serde_yaml::to_string(&vec![usage])? - } else { - usage.to_string() + TestObjective::ApprovalVoting(ref options) => { + let (mut env, state) = + approval::prepare_test(test_config.clone(), options.clone(), true); + env.runtime().block_on(approval::bench_approvals( + &benchmark_name, + &mut env, + state, + )) + }, }; - println!("{}", output); + println!("{}", usage); } if let Some(agent_running) = agent_running { diff --git a/polkadot/node/subsystem-bench/src/valgrind.rs b/polkadot/node/subsystem-bench/src/cli/valgrind.rs similarity index 100% rename from polkadot/node/subsystem-bench/src/valgrind.rs rename to polkadot/node/subsystem-bench/src/cli/valgrind.rs diff --git a/polkadot/node/subsystem-bench/src/approval/helpers.rs b/polkadot/node/subsystem-bench/src/lib/approval/helpers.rs similarity index 99% rename from polkadot/node/subsystem-bench/src/approval/helpers.rs rename to polkadot/node/subsystem-bench/src/lib/approval/helpers.rs index 623d91848f53f26e69ad7595c52a0e61466af6d1..af5ff5aa1facc11f2d0caa8ec77276976a0554a5 100644 --- a/polkadot/node/subsystem-bench/src/approval/helpers.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/helpers.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use crate::core::configuration::TestAuthorities; +use crate::configuration::TestAuthorities; use itertools::Itertools; use polkadot_node_core_approval_voting::time::{Clock, SystemClock, Tick}; use polkadot_node_network_protocol::{ diff --git a/polkadot/node/subsystem-bench/src/approval/message_generator.rs b/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs similarity index 97% rename from polkadot/node/subsystem-bench/src/approval/message_generator.rs rename to polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs index a71034013247678aeb9aced2619f8d1d6d29ebe9..c1b31a509f6dd21c3f23bb44afec8859e51fa76a 100644 --- a/polkadot/node/subsystem-bench/src/approval/message_generator.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs @@ -18,15 +18,12 @@ use crate::{ approval::{ helpers::{generate_babe_epoch, generate_topology}, test_message::{MessagesBundle, TestMessageInfo}, - ApprovalTestState, BlockTestData, GeneratedState, BUFFER_FOR_GENERATION_MILLIS, LOG_TARGET, - SLOT_DURATION_MILLIS, + ApprovalTestState, ApprovalsOptions, BlockTestData, GeneratedState, + BUFFER_FOR_GENERATION_MILLIS, LOG_TARGET, SLOT_DURATION_MILLIS, }, - core::{ - configuration::{TestAuthorities, TestConfiguration}, - mock::runtime_api::session_info_for_peers, - NODE_UNDER_TEST, - }, - ApprovalsOptions, TestObjective, + configuration::{TestAuthorities, TestConfiguration}, + mock::runtime_api::session_info_for_peers, + NODE_UNDER_TEST, }; use futures::SinkExt; use itertools::Itertools; @@ -132,11 +129,7 @@ impl PeerMessagesGenerator { options: &ApprovalsOptions, ) -> String { let mut fingerprint = options.fingerprint(); - let mut exclude_objective = configuration.clone(); - // The objective contains the full content of `ApprovalOptions`, we don't want to put all of - // that in fingerprint, so execlute it because we add it manually see above. - exclude_objective.objective = TestObjective::Unimplemented; - let configuration_bytes = bincode::serialize(&exclude_objective).unwrap(); + let configuration_bytes = bincode::serialize(&configuration).unwrap(); fingerprint.extend(configuration_bytes); let mut sha1 = sha1::Sha1::new(); sha1.update(fingerprint); diff --git a/polkadot/node/subsystem-bench/src/approval/mock_chain_selection.rs b/polkadot/node/subsystem-bench/src/lib/approval/mock_chain_selection.rs similarity index 100% rename from polkadot/node/subsystem-bench/src/approval/mock_chain_selection.rs rename to polkadot/node/subsystem-bench/src/lib/approval/mock_chain_selection.rs diff --git a/polkadot/node/subsystem-bench/src/approval/mod.rs b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs similarity index 97% rename from polkadot/node/subsystem-bench/src/approval/mod.rs rename to polkadot/node/subsystem-bench/src/lib/approval/mod.rs index f07912de1887580a58770b9bf9e76624c60bdcc4..450faf06123f61db9ee91d1d935721d3c128e701 100644 --- a/polkadot/node/subsystem-bench/src/approval/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs @@ -24,25 +24,21 @@ use crate::{ mock_chain_selection::MockChainSelection, test_message::{MessagesBundle, TestMessageInfo}, }, - core::{ - configuration::TestAuthorities, - environment::{ - BenchmarkUsage, TestEnvironment, TestEnvironmentDependencies, MAX_TIME_OF_FLIGHT, - }, - mock::{ - chain_api::{ChainApiState, MockChainApi}, - dummy_builder, - network_bridge::{MockNetworkBridgeRx, MockNetworkBridgeTx}, - runtime_api::MockRuntimeApi, - AlwaysSupportsParachains, TestSyncOracle, - }, - network::{ - new_network, HandleNetworkMessage, NetworkEmulatorHandle, NetworkInterface, - NetworkInterfaceReceiver, - }, - NODE_UNDER_TEST, + configuration::{TestAuthorities, TestConfiguration}, + dummy_builder, + environment::{TestEnvironment, TestEnvironmentDependencies, MAX_TIME_OF_FLIGHT}, + mock::{ + chain_api::{ChainApiState, MockChainApi}, + network_bridge::{MockNetworkBridgeRx, MockNetworkBridgeTx}, + runtime_api::MockRuntimeApi, + AlwaysSupportsParachains, TestSyncOracle, }, - TestConfiguration, + network::{ + new_network, HandleNetworkMessage, NetworkEmulatorHandle, NetworkInterface, + NetworkInterfaceReceiver, + }, + usage::BenchmarkUsage, + NODE_UNDER_TEST, }; use colored::Colorize; use futures::channel::oneshot; @@ -472,11 +468,9 @@ impl ApprovalTestState { impl HandleNetworkMessage for ApprovalTestState { fn handle( &self, - _message: crate::core::network::NetworkMessage, - _node_sender: &mut futures::channel::mpsc::UnboundedSender< - crate::core::network::NetworkMessage, - >, - ) -> Option { + _message: crate::network::NetworkMessage, + _node_sender: &mut futures::channel::mpsc::UnboundedSender, + ) -> Option { self.total_sent_messages_from_node .as_ref() .fetch_add(1, std::sync::atomic::Ordering::SeqCst); @@ -841,8 +835,14 @@ fn build_overseer( pub fn prepare_test( config: TestConfiguration, options: ApprovalsOptions, + with_prometheus_endpoint: bool, ) -> (TestEnvironment, ApprovalTestState) { - prepare_test_inner(config, TestEnvironmentDependencies::default(), options) + prepare_test_inner( + config, + TestEnvironmentDependencies::default(), + options, + with_prometheus_endpoint, + ) } /// Build the test environment for an Approval benchmark. @@ -850,6 +850,7 @@ fn prepare_test_inner( config: TestConfiguration, dependencies: TestEnvironmentDependencies, options: ApprovalsOptions, + with_prometheus_endpoint: bool, ) -> (TestEnvironment, ApprovalTestState) { gum::info!("Prepare test state"); let state = ApprovalTestState::new(&config, options, &dependencies); @@ -878,6 +879,7 @@ fn prepare_test_inner( overseer, overseer_handle, state.test_authorities.clone(), + with_prometheus_endpoint, ), state, ) diff --git a/polkadot/node/subsystem-bench/src/approval/test_message.rs b/polkadot/node/subsystem-bench/src/lib/approval/test_message.rs similarity index 98% rename from polkadot/node/subsystem-bench/src/approval/test_message.rs rename to polkadot/node/subsystem-bench/src/lib/approval/test_message.rs index 8aaabc3426c803563bdb2ed8455eed8a0061484e..63e383509be9ff6da6e1d5dd61baebc99c574bc4 100644 --- a/polkadot/node/subsystem-bench/src/approval/test_message.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/test_message.rs @@ -15,9 +15,8 @@ // along with Polkadot. If not, see . use crate::{ - approval::{BlockTestData, CandidateTestData}, - core::configuration::TestAuthorities, - ApprovalsOptions, + approval::{ApprovalsOptions, BlockTestData, CandidateTestData}, + configuration::TestAuthorities, }; use itertools::Itertools; use parity_scale_codec::{Decode, Encode}; diff --git a/polkadot/node/subsystem-bench/src/availability/av_store_helpers.rs b/polkadot/node/subsystem-bench/src/lib/availability/av_store_helpers.rs similarity index 94% rename from polkadot/node/subsystem-bench/src/availability/av_store_helpers.rs rename to polkadot/node/subsystem-bench/src/lib/availability/av_store_helpers.rs index 261dbd0376c732fb4688e6251f32e21d3ec8d4d4..3300def2235ee0ea90f466316f9bc5a7a965869c 100644 --- a/polkadot/node/subsystem-bench/src/availability/av_store_helpers.rs +++ b/polkadot/node/subsystem-bench/src/lib/availability/av_store_helpers.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use crate::core::{environment::TestEnvironmentDependencies, mock::TestSyncOracle}; +use crate::{environment::TestEnvironmentDependencies, mock::TestSyncOracle}; use polkadot_node_core_av_store::{AvailabilityStoreSubsystem, Config}; use polkadot_node_metrics::metrics::Metrics; use polkadot_node_subsystem_util::database::Database; diff --git a/polkadot/node/subsystem-bench/src/availability/mod.rs b/polkadot/node/subsystem-bench/src/lib/availability/mod.rs similarity index 94% rename from polkadot/node/subsystem-bench/src/availability/mod.rs rename to polkadot/node/subsystem-bench/src/lib/availability/mod.rs index ad9a17ff8f47a1a3dc7e8e8e990cb0b1f7f262f6..dc4e1e4031025ca3f868265ac2e8c902f6fc8c23 100644 --- a/polkadot/node/subsystem-bench/src/availability/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/availability/mod.rs @@ -15,22 +15,18 @@ // along with Polkadot. If not, see . use crate::{ - core::{ - configuration::TestConfiguration, - environment::{BenchmarkUsage, TestEnvironmentDependencies}, - mock::{ - av_store, - av_store::MockAvailabilityStore, - chain_api::{ChainApiState, MockChainApi}, - dummy_builder, - network_bridge::{self, MockNetworkBridgeRx, MockNetworkBridgeTx}, - runtime_api, - runtime_api::MockRuntimeApi, - AlwaysSupportsParachains, - }, - network::new_network, + configuration::TestConfiguration, + dummy_builder, + environment::{TestEnvironment, TestEnvironmentDependencies, GENESIS_HASH}, + mock::{ + av_store::{self, MockAvailabilityStore}, + chain_api::{ChainApiState, MockChainApi}, + network_bridge::{self, MockNetworkBridgeRx, MockNetworkBridgeTx}, + runtime_api::{self, MockRuntimeApi}, + AlwaysSupportsParachains, }, - TestEnvironment, TestObjective, GENESIS_HASH, + network::new_network, + usage::BenchmarkUsage, }; use av_store::NetworkAvailabilityState; use av_store_helpers::new_av_store; @@ -73,14 +69,30 @@ use sc_network::{ PeerId, }; use sc_service::SpawnTaskHandle; +use serde::{Deserialize, Serialize}; use sp_core::H256; use std::{collections::HashMap, iter::Cycle, ops::Sub, sync::Arc, time::Instant}; mod av_store_helpers; -pub(crate) mod cli; const LOG_TARGET: &str = "subsystem-bench::availability"; +#[derive(Debug, Clone, Serialize, Deserialize, clap::Parser)] +#[clap(rename_all = "kebab-case")] +#[allow(missing_docs)] +pub struct DataAvailabilityReadOptions { + #[clap(short, long, default_value_t = false)] + /// Turbo boost AD Read by fetching the full availability datafrom backers first. Saves CPU as + /// we don't need to re-construct from chunks. Tipically this is only faster if nodes have + /// enough bandwidth. + pub fetch_from_backers: bool, +} + +pub enum TestDataAvailability { + Read(DataAvailabilityReadOptions), + Write, +} + fn build_overseer_for_availability_read( spawn_task_handle: SpawnTaskHandle, runtime_api: MockRuntimeApi, @@ -141,14 +153,24 @@ fn build_overseer_for_availability_write( pub fn prepare_test( config: TestConfiguration, state: &mut TestState, + mode: TestDataAvailability, + with_prometheus_endpoint: bool, ) -> (TestEnvironment, Vec) { - prepare_test_inner(config, state, TestEnvironmentDependencies::default()) + prepare_test_inner( + config, + state, + mode, + TestEnvironmentDependencies::default(), + with_prometheus_endpoint, + ) } fn prepare_test_inner( config: TestConfiguration, state: &mut TestState, + mode: TestDataAvailability, dependencies: TestEnvironmentDependencies, + with_prometheus_endpoint: bool, ) -> (TestEnvironment, Vec) { // Generate test authorities. let test_authorities = config.generate_authorities(); @@ -216,8 +238,8 @@ fn prepare_test_inner( let network_bridge_rx = network_bridge::MockNetworkBridgeRx::new(network_receiver, Some(chunk_req_cfg.clone())); - let (overseer, overseer_handle) = match &state.config().objective { - TestObjective::DataAvailabilityRead(options) => { + let (overseer, overseer_handle) = match &mode { + TestDataAvailability::Read(options) => { let use_fast_path = options.fetch_from_backers; let subsystem = if use_fast_path { @@ -247,7 +269,7 @@ fn prepare_test_inner( &dependencies, ) }, - TestObjective::DataAvailabilityWrite => { + TestDataAvailability::Write => { let availability_distribution = AvailabilityDistributionSubsystem::new( test_authorities.keyring.keystore(), IncomingRequestReceivers { pov_req_receiver, chunk_req_receiver }, @@ -284,9 +306,6 @@ fn prepare_test_inner( &dependencies, ) }, - _ => { - unimplemented!("Invalid test objective") - }, }; ( @@ -297,6 +316,7 @@ fn prepare_test_inner( overseer, overseer_handle, test_authorities, + with_prometheus_endpoint, ), req_cfgs, ) @@ -326,10 +346,6 @@ pub struct TestState { } impl TestState { - fn config(&self) -> &TestConfiguration { - &self.config - } - pub fn next_candidate(&mut self) -> Option { let candidate = self.candidates.next(); let candidate_hash = candidate.as_ref().unwrap().hash(); @@ -582,7 +598,7 @@ pub async fn benchmark_availability_write( gum::info!(target: LOG_TARGET, "Waiting for all emulated peers to receive their chunk from us ..."); for receiver in receivers.into_iter() { - let response = receiver.await.expect("Chunk is always served succesfully"); + let response = receiver.await.expect("Chunk is always served successfully"); // TODO: check if chunk is the one the peer expects to receive. assert!(response.result.is_ok()); } diff --git a/polkadot/node/subsystem-bench/src/core/configuration.rs b/polkadot/node/subsystem-bench/src/lib/configuration.rs similarity index 78% rename from polkadot/node/subsystem-bench/src/core/configuration.rs rename to polkadot/node/subsystem-bench/src/lib/configuration.rs index 00be2a86b173a61465d7610d75df1abbe09363f0..17c81c4fd355bd15c5e2a6c1005e90313ca80615 100644 --- a/polkadot/node/subsystem-bench/src/core/configuration.rs +++ b/polkadot/node/subsystem-bench/src/lib/configuration.rs @@ -16,7 +16,7 @@ //! Test configuration definition and helpers. -use crate::{core::keyring::Keyring, TestObjective}; +use crate::keyring::Keyring; use itertools::Itertools; use polkadot_primitives::{AssignmentId, AuthorityDiscoveryId, ValidatorId}; use rand::thread_rng; @@ -24,17 +24,7 @@ use rand_distr::{Distribution, Normal, Uniform}; use sc_network::PeerId; use serde::{Deserialize, Serialize}; use sp_consensus_babe::AuthorityId; -use std::{collections::HashMap, path::Path}; - -pub fn random_pov_size(min_pov_size: usize, max_pov_size: usize) -> usize { - random_uniform_sample(min_pov_size, max_pov_size) -} - -fn random_uniform_sample + From>(min_value: T, max_value: T) -> T { - Uniform::from(min_value.into()..=max_value.into()) - .sample(&mut thread_rng()) - .into() -} +use std::collections::HashMap; /// Peer networking latency configuration. #[derive(Clone, Debug, Default, Serialize, Deserialize)] @@ -45,19 +35,34 @@ pub struct PeerLatency { pub std_dev: f64, } +// Based on Kusama `max_validators` +fn default_n_validators() -> usize { + 300 +} + +// Based on Kusama cores +fn default_n_cores() -> usize { + 60 +} + // Default PoV size in KiB. fn default_pov_size() -> usize { - 5120 + 5 * 1024 } -// Default bandwidth in bytes +// Default bandwidth in bytes, based stats from Kusama validators fn default_bandwidth() -> usize { - 52428800 + 42 * 1024 * 1024 +} + +// Default peer latency +fn default_peer_latency() -> Option { + Some(PeerLatency { mean_latency_ms: 30, std_dev: 2.0 }) } // Default connectivity percentage fn default_connectivity() -> usize { - 100 + 90 } // Default backing group size @@ -73,6 +78,7 @@ fn default_needed_approvals() -> usize { fn default_zeroth_delay_tranche_width() -> usize { 0 } + fn default_relay_vrf_modulo_samples() -> usize { 6 } @@ -87,11 +93,11 @@ fn default_no_show_slots() -> usize { /// The test input parameters #[derive(Clone, Debug, Serialize, Deserialize)] pub struct TestConfiguration { - /// The test objective - pub objective: TestObjective, /// Number of validators + #[serde(default = "default_n_validators")] pub n_validators: usize, /// Number of cores + #[serde(default = "default_n_cores")] pub n_cores: usize, /// The number of needed votes to approve a candidate. #[serde(default = "default_needed_approvals")] @@ -115,7 +121,7 @@ pub struct TestConfiguration { pub max_pov_size: usize, /// Randomly sampled pov_sizes #[serde(skip)] - pov_sizes: Vec, + pub pov_sizes: Vec, /// The amount of bandiwdth remote validators have. #[serde(default = "default_bandwidth")] pub peer_bandwidth: usize, @@ -123,66 +129,42 @@ pub struct TestConfiguration { #[serde(default = "default_bandwidth")] pub bandwidth: usize, /// Optional peer emulation latency (round trip time) wrt node under test - #[serde(default)] + #[serde(default = "default_peer_latency")] pub latency: Option, - /// Connectivity ratio, the percentage of peers we are not connected to, but ar part of - /// the topology. + /// Connectivity ratio, the percentage of peers we are connected to, but as part of the + /// topology. #[serde(default = "default_connectivity")] pub connectivity: usize, /// Number of blocks to run the test for pub num_blocks: usize, } -fn generate_pov_sizes(count: usize, min_kib: usize, max_kib: usize) -> Vec { - (0..count).map(|_| random_pov_size(min_kib * 1024, max_kib * 1024)).collect() -} - -#[derive(Serialize, Deserialize)] -pub struct TestSequence { - #[serde(rename(serialize = "TestConfiguration", deserialize = "TestConfiguration"))] - test_configurations: Vec, -} - -impl TestSequence { - pub fn into_vec(self) -> Vec { - self.test_configurations - .into_iter() - .map(|mut config| { - config.pov_sizes = - generate_pov_sizes(config.n_cores, config.min_pov_size, config.max_pov_size); - config - }) - .collect() - } -} - -impl TestSequence { - pub fn new_from_file(path: &Path) -> std::io::Result { - let string = String::from_utf8(std::fs::read(path)?).expect("File is valid UTF8"); - Ok(serde_yaml::from_str(&string).expect("File is valid test sequence YA")) +impl Default for TestConfiguration { + fn default() -> Self { + Self { + n_validators: default_n_validators(), + n_cores: default_n_cores(), + needed_approvals: default_needed_approvals(), + zeroth_delay_tranche_width: default_zeroth_delay_tranche_width(), + relay_vrf_modulo_samples: default_relay_vrf_modulo_samples(), + n_delay_tranches: default_n_delay_tranches(), + no_show_slots: default_no_show_slots(), + max_validators_per_core: default_backing_group_size(), + min_pov_size: default_pov_size(), + max_pov_size: default_pov_size(), + pov_sizes: Default::default(), + peer_bandwidth: default_bandwidth(), + bandwidth: default_bandwidth(), + latency: default_peer_latency(), + connectivity: default_connectivity(), + num_blocks: Default::default(), + } } } -/// Helper struct for authority related state. -#[derive(Clone)] -pub struct TestAuthorities { - pub keyring: Keyring, - pub validator_public: Vec, - pub validator_authority_id: Vec, - pub validator_babe_id: Vec, - pub validator_assignment_id: Vec, - pub key_seeds: Vec, - pub peer_ids: Vec, - pub peer_id_to_authority: HashMap, -} - impl TestConfiguration { - #[allow(unused)] - pub fn write_to_disk(&self) { - // Serialize a slice of configurations - let yaml = serde_yaml::to_string(&TestSequence { test_configurations: vec![self.clone()] }) - .unwrap(); - std::fs::write("last_test.yaml", yaml).unwrap(); + pub fn generate_pov_sizes(&mut self) { + self.pov_sizes = generate_pov_sizes(self.n_cores, self.min_pov_size, self.max_pov_size); } pub fn pov_sizes(&self) -> &[usize] { @@ -239,6 +221,33 @@ impl TestConfiguration { } } +fn random_uniform_sample + From>(min_value: T, max_value: T) -> T { + Uniform::from(min_value.into()..=max_value.into()) + .sample(&mut thread_rng()) + .into() +} + +fn random_pov_size(min_pov_size: usize, max_pov_size: usize) -> usize { + random_uniform_sample(min_pov_size, max_pov_size) +} + +fn generate_pov_sizes(count: usize, min_kib: usize, max_kib: usize) -> Vec { + (0..count).map(|_| random_pov_size(min_kib * 1024, max_kib * 1024)).collect() +} + +/// Helper struct for authority related state. +#[derive(Clone)] +pub struct TestAuthorities { + pub keyring: Keyring, + pub validator_public: Vec, + pub validator_authority_id: Vec, + pub validator_babe_id: Vec, + pub validator_assignment_id: Vec, + pub key_seeds: Vec, + pub peer_ids: Vec, + pub peer_id_to_authority: HashMap, +} + /// Sample latency (in milliseconds) from a normal distribution with parameters /// specified in `maybe_peer_latency`. pub fn random_latency(maybe_peer_latency: Option<&PeerLatency>) -> usize { diff --git a/polkadot/node/subsystem-bench/src/core/display.rs b/polkadot/node/subsystem-bench/src/lib/display.rs similarity index 89% rename from polkadot/node/subsystem-bench/src/core/display.rs rename to polkadot/node/subsystem-bench/src/lib/display.rs index 13a349382e2fd776f41685fb133b96f1058e7b43..b153d54a7c36f43bc110dde9443f2413a34d9af6 100644 --- a/polkadot/node/subsystem-bench/src/core/display.rs +++ b/polkadot/node/subsystem-bench/src/lib/display.rs @@ -19,7 +19,7 @@ //! //! Currently histogram buckets are skipped. -use crate::{TestConfiguration, LOG_TARGET}; +use crate::configuration::TestConfiguration; use colored::Colorize; use prometheus::{ proto::{MetricFamily, MetricType}, @@ -27,6 +27,8 @@ use prometheus::{ }; use std::fmt::Display; +const LOG_TARGET: &str = "subsystem-bench::display"; + #[derive(Default, Debug)] pub struct MetricCollection(Vec); @@ -85,6 +87,7 @@ impl Display for MetricCollection { Ok(()) } } + #[derive(Debug, Clone)] pub struct TestMetric { name: String, @@ -184,15 +187,16 @@ pub fn parse_metrics(registry: &Registry) -> MetricCollection { test_metrics.into() } -pub fn display_configuration(test_config: &TestConfiguration) { - gum::info!( - "[{}] {}, {}, {}, {}, {}", - format!("objective = {:?}", test_config.objective).green(), - format!("n_validators = {}", test_config.n_validators).blue(), - format!("n_cores = {}", test_config.n_cores).blue(), - format!("pov_size = {} - {}", test_config.min_pov_size, test_config.max_pov_size) - .bright_black(), - format!("connectivity = {}", test_config.connectivity).bright_black(), - format!("latency = {:?}", test_config.latency).bright_black(), - ); +impl Display for TestConfiguration { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{}, {}, {}, {}, {}", + format!("n_validators = {}", self.n_validators).blue(), + format!("n_cores = {}", self.n_cores).blue(), + format!("pov_size = {} - {}", self.min_pov_size, self.max_pov_size).bright_black(), + format!("connectivity = {}", self.connectivity).bright_black(), + format!("latency = {:?}", self.latency).bright_black(), + ) + } } diff --git a/polkadot/node/subsystem-bench/src/core/environment.rs b/polkadot/node/subsystem-bench/src/lib/environment.rs similarity index 88% rename from polkadot/node/subsystem-bench/src/core/environment.rs rename to polkadot/node/subsystem-bench/src/lib/environment.rs index ca4c41cf45f99851023e1c6899b3d1f5e7913eb8..958ed50d0894b76ddb66978950971620dceb4aa0 100644 --- a/polkadot/node/subsystem-bench/src/core/environment.rs +++ b/polkadot/node/subsystem-bench/src/lib/environment.rs @@ -17,13 +17,11 @@ //! Test environment implementation use crate::{ - core::{ - configuration::TestAuthorities, mock::AlwaysSupportsParachains, - network::NetworkEmulatorHandle, - }, - TestConfiguration, + configuration::{TestAuthorities, TestConfiguration}, + mock::AlwaysSupportsParachains, + network::NetworkEmulatorHandle, + usage::{BenchmarkUsage, ResourceUsage}, }; -use colored::Colorize; use core::time::Duration; use futures::{Future, FutureExt}; use polkadot_node_subsystem::{messages::AllMessages, Overseer, SpawnGlue, TimeoutExt}; @@ -33,7 +31,6 @@ use polkadot_node_subsystem_util::metrics::prometheus::{ }; use polkadot_overseer::{BlockInfo, Handle as OverseerHandle}; use sc_service::{SpawnTaskHandle, TaskManager}; -use serde::{Deserialize, Serialize}; use std::net::{Ipv4Addr, SocketAddr}; use tokio::runtime::Handle; @@ -204,6 +201,7 @@ impl TestEnvironment { overseer: Overseer, AlwaysSupportsParachains>, overseer_handle: OverseerHandle, authorities: TestAuthorities, + with_prometheus_endpoint: bool, ) -> Self { let metrics = TestEnvironmentMetrics::new(&dependencies.registry) .expect("Metrics need to be registered"); @@ -211,19 +209,21 @@ impl TestEnvironment { let spawn_handle = dependencies.task_manager.spawn_handle(); spawn_handle.spawn_blocking("overseer", "overseer", overseer.run().boxed()); - let registry_clone = dependencies.registry.clone(); - dependencies.task_manager.spawn_handle().spawn_blocking( - "prometheus", - "test-environment", - async move { - prometheus_endpoint::init_prometheus( - SocketAddr::new(std::net::IpAddr::V4(Ipv4Addr::LOCALHOST), 9999), - registry_clone, - ) - .await - .unwrap(); - }, - ); + if with_prometheus_endpoint { + let registry_clone = dependencies.registry.clone(); + dependencies.task_manager.spawn_handle().spawn_blocking( + "prometheus", + "test-environment", + async move { + prometheus_endpoint::init_prometheus( + SocketAddr::new(std::net::IpAddr::V4(Ipv4Addr::LOCALHOST), 9999), + registry_clone, + ) + .await + .unwrap(); + }, + ); + } TestEnvironment { runtime_handle: dependencies.runtime.handle().clone(), @@ -411,41 +411,3 @@ impl TestEnvironment { usage } } - -#[derive(Debug, Serialize, Deserialize)] -pub struct BenchmarkUsage { - benchmark_name: String, - network_usage: Vec, - cpu_usage: Vec, -} - -impl std::fmt::Display for BenchmarkUsage { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!( - f, - "\n{}\n\n{}\n{}\n\n{}\n{}\n", - self.benchmark_name.purple(), - format!("{:<32}{:>12}{:>12}", "Network usage, KiB", "total", "per block").blue(), - self.network_usage - .iter() - .map(|v| v.to_string()) - .collect::>() - .join("\n"), - format!("{:<32}{:>12}{:>12}", "CPU usage in seconds", "total", "per block").blue(), - self.cpu_usage.iter().map(|v| v.to_string()).collect::>().join("\n") - ) - } -} - -#[derive(Debug, Serialize, Deserialize)] -pub struct ResourceUsage { - resource_name: String, - total: f64, - per_block: f64, -} - -impl std::fmt::Display for ResourceUsage { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{:<32}{:>12.3}{:>12.3}", self.resource_name.cyan(), self.total, self.per_block) - } -} diff --git a/polkadot/node/subsystem-bench/src/core/keyring.rs b/polkadot/node/subsystem-bench/src/lib/keyring.rs similarity index 100% rename from polkadot/node/subsystem-bench/src/core/keyring.rs rename to polkadot/node/subsystem-bench/src/lib/keyring.rs diff --git a/polkadot/node/subsystem-bench/src/core/mod.rs b/polkadot/node/subsystem-bench/src/lib/lib.rs similarity index 88% rename from polkadot/node/subsystem-bench/src/core/mod.rs rename to polkadot/node/subsystem-bench/src/lib/lib.rs index 764184c5b37779efb7dc22dead338cc05ee9b942..d06f2822a8958169a15f449e71251e3cc62eb9d6 100644 --- a/polkadot/node/subsystem-bench/src/core/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/lib.rs @@ -15,11 +15,14 @@ // along with Polkadot. If not, see . // The validator index that represent the node that is under test. -pub(crate) const NODE_UNDER_TEST: u32 = 0; +pub const NODE_UNDER_TEST: u32 = 0; -pub(crate) mod configuration; +pub mod approval; +pub mod availability; +pub mod configuration; pub(crate) mod display; pub(crate) mod environment; pub(crate) mod keyring; pub(crate) mod mock; pub(crate) mod network; +pub mod usage; diff --git a/polkadot/node/subsystem-bench/src/core/mock/av_store.rs b/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs similarity index 98% rename from polkadot/node/subsystem-bench/src/core/mock/av_store.rs rename to polkadot/node/subsystem-bench/src/lib/mock/av_store.rs index 0a7725c91e0421e79a20b2ec77d8ade72ab05714..9064f17940f004fc123bc25f24ad5de1704e2d38 100644 --- a/polkadot/node/subsystem-bench/src/core/mock/av_store.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs @@ -16,7 +16,7 @@ //! A generic av store subsystem mockup suitable to be used in benchmarks. -use crate::core::network::{HandleNetworkMessage, NetworkMessage}; +use crate::network::{HandleNetworkMessage, NetworkMessage}; use futures::{channel::oneshot, FutureExt}; use parity_scale_codec::Encode; use polkadot_node_network_protocol::request_response::{ @@ -97,7 +97,7 @@ impl HandleNetworkMessage for NetworkAvailabilityState { outgoing_request .pending_response .send(response) - .expect("Response is always sent succesfully"); + .expect("Response is always sent successfully"); None }, _ => Some(NetworkMessage::RequestFromNode(peer, request)), diff --git a/polkadot/node/subsystem-bench/src/core/mock/chain_api.rs b/polkadot/node/subsystem-bench/src/lib/mock/chain_api.rs similarity index 100% rename from polkadot/node/subsystem-bench/src/core/mock/chain_api.rs rename to polkadot/node/subsystem-bench/src/lib/mock/chain_api.rs diff --git a/polkadot/node/subsystem-bench/src/core/mock/dummy.rs b/polkadot/node/subsystem-bench/src/lib/mock/dummy.rs similarity index 100% rename from polkadot/node/subsystem-bench/src/core/mock/dummy.rs rename to polkadot/node/subsystem-bench/src/lib/mock/dummy.rs diff --git a/polkadot/node/subsystem-bench/src/core/mock/mod.rs b/polkadot/node/subsystem-bench/src/lib/mock/mod.rs similarity index 97% rename from polkadot/node/subsystem-bench/src/core/mock/mod.rs rename to polkadot/node/subsystem-bench/src/lib/mock/mod.rs index 46fdeb196c011587dd081851e9e1f31863a0530e..6dda9a47d398f3e6e952cd054bce9769e8942e70 100644 --- a/polkadot/node/subsystem-bench/src/core/mock/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/mod.rs @@ -34,9 +34,10 @@ impl HeadSupportsParachains for AlwaysSupportsParachains { } // An orchestra with dummy subsystems +#[macro_export] macro_rules! dummy_builder { ($spawn_task_handle: ident, $metrics: ident) => {{ - use $crate::core::mock::dummy::*; + use $crate::mock::dummy::*; // Initialize a mock overseer. // All subsystem except approval_voting and approval_distribution are mock subsystems. @@ -72,7 +73,6 @@ macro_rules! dummy_builder { .spawner(SpawnGlue($spawn_task_handle)) }}; } -pub(crate) use dummy_builder; #[derive(Clone)] pub struct TestSyncOracle {} diff --git a/polkadot/node/subsystem-bench/src/core/mock/network_bridge.rs b/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs similarity index 99% rename from polkadot/node/subsystem-bench/src/core/mock/network_bridge.rs rename to polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs index 4682c7ec79ae3532858365b135636afefcb5400f..d598f6447d3d43ece5e1d0fa6364508403c032e9 100644 --- a/polkadot/node/subsystem-bench/src/core/mock/network_bridge.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs @@ -17,7 +17,7 @@ //! Mocked `network-bridge` subsystems that uses a `NetworkInterface` to access //! the emulated network. -use crate::core::{ +use crate::{ configuration::TestAuthorities, network::{NetworkEmulatorHandle, NetworkInterfaceReceiver, NetworkMessage, RequestExt}, }; diff --git a/polkadot/node/subsystem-bench/src/core/mock/runtime_api.rs b/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs similarity index 99% rename from polkadot/node/subsystem-bench/src/core/mock/runtime_api.rs rename to polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs index 0dd76efcbaf0de5cdbd456d0bff91658ad0ac737..53faf562f03c005238d5297c29117fd261b49eca 100644 --- a/polkadot/node/subsystem-bench/src/core/mock/runtime_api.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs @@ -16,7 +16,7 @@ //! A generic runtime api subsystem mockup suitable to be used in benchmarks. -use crate::core::configuration::{TestAuthorities, TestConfiguration}; +use crate::configuration::{TestAuthorities, TestConfiguration}; use bitvec::prelude::BitVec; use futures::FutureExt; use itertools::Itertools; diff --git a/polkadot/node/subsystem-bench/src/core/network.rs b/polkadot/node/subsystem-bench/src/lib/network.rs similarity index 99% rename from polkadot/node/subsystem-bench/src/core/network.rs rename to polkadot/node/subsystem-bench/src/lib/network.rs index e9124726d7c063f15eaf7b048b291b1b2115e897..1bc35a014ff5bf616bb02b0f8740679ef6b67489 100644 --- a/polkadot/node/subsystem-bench/src/core/network.rs +++ b/polkadot/node/subsystem-bench/src/lib/network.rs @@ -33,7 +33,7 @@ // | // Subsystems under test -use crate::core::{ +use crate::{ configuration::{random_latency, TestAuthorities, TestConfiguration}, environment::TestEnvironmentDependencies, NODE_UNDER_TEST, @@ -219,7 +219,7 @@ impl Future for ProxiedRequest { Poll::Ready(response) => Poll::Ready(ProxiedResponse { sender: self.sender.take().expect("sender already used"), result: response - .expect("Response is always succesfully received.") + .expect("Response is always successfully received.") .result .map_err(|_| RequestFailure::Refused), }), @@ -761,7 +761,7 @@ pub fn new_network( gum::info!(target: LOG_TARGET, "{}",format!("connectivity {}%, latency {:?}", config.connectivity, config.latency).bright_black()); let metrics = - Metrics::new(&dependencies.registry).expect("Metrics always register succesfully"); + Metrics::new(&dependencies.registry).expect("Metrics always register successfully"); let mut validator_authority_id_mapping = HashMap::new(); // Create the channel from `peer` to `NetworkInterface` . diff --git a/polkadot/node/subsystem-bench/src/lib/usage.rs b/polkadot/node/subsystem-bench/src/lib/usage.rs new file mode 100644 index 0000000000000000000000000000000000000000..b83ef7d98d918e2c398f9f4b28558b1b93851051 --- /dev/null +++ b/polkadot/node/subsystem-bench/src/lib/usage.rs @@ -0,0 +1,146 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Test usage implementation + +use colored::Colorize; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; + +#[derive(Debug, Serialize, Deserialize)] +pub struct BenchmarkUsage { + pub benchmark_name: String, + pub network_usage: Vec, + pub cpu_usage: Vec, +} + +impl std::fmt::Display for BenchmarkUsage { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!( + f, + "\n{}\n\n{}\n{}\n\n{}\n{}\n", + self.benchmark_name.purple(), + format!("{:<32}{:>12}{:>12}", "Network usage, KiB", "total", "per block").blue(), + self.network_usage + .iter() + .map(|v| v.to_string()) + .collect::>() + .join("\n"), + format!("{:<32}{:>12}{:>12}", "CPU usage, seconds", "total", "per block").blue(), + self.cpu_usage.iter().map(|v| v.to_string()).collect::>().join("\n") + ) + } +} + +impl BenchmarkUsage { + pub fn average(usages: &[Self]) -> Self { + let all_network_usages: Vec<&ResourceUsage> = + usages.iter().flat_map(|v| &v.network_usage).collect(); + let all_cpu_usage: Vec<&ResourceUsage> = usages.iter().flat_map(|v| &v.cpu_usage).collect(); + + Self { + benchmark_name: usages.first().map(|v| v.benchmark_name.clone()).unwrap_or_default(), + network_usage: ResourceUsage::average_by_resource_name(&all_network_usages), + cpu_usage: ResourceUsage::average_by_resource_name(&all_cpu_usage), + } + } + + pub fn check_network_usage(&self, checks: &[ResourceUsageCheck]) -> Vec { + check_usage(&self.benchmark_name, &self.network_usage, checks) + } + + pub fn check_cpu_usage(&self, checks: &[ResourceUsageCheck]) -> Vec { + check_usage(&self.benchmark_name, &self.cpu_usage, checks) + } + + pub fn cpu_usage_diff(&self, other: &Self, resource_name: &str) -> Option { + let self_res = self.cpu_usage.iter().find(|v| v.resource_name == resource_name); + let other_res = other.cpu_usage.iter().find(|v| v.resource_name == resource_name); + + match (self_res, other_res) { + (Some(self_res), Some(other_res)) => Some(self_res.diff(other_res)), + _ => None, + } + } +} + +fn check_usage( + benchmark_name: &str, + usage: &[ResourceUsage], + checks: &[ResourceUsageCheck], +) -> Vec { + checks + .iter() + .filter_map(|check| { + check_resource_usage(usage, check) + .map(|message| format!("{}: {}", benchmark_name, message)) + }) + .collect() +} + +fn check_resource_usage( + usage: &[ResourceUsage], + (resource_name, base, precision): &ResourceUsageCheck, +) -> Option { + if let Some(usage) = usage.iter().find(|v| v.resource_name == *resource_name) { + let diff = (base - usage.per_block).abs() / base; + if diff < *precision { + None + } else { + Some(format!( + "The resource `{}` is expected to be equal to {} with a precision {}, but the current value is {}", + resource_name, base, precision, usage.per_block + )) + } + } else { + Some(format!("The resource `{}` is not found", resource_name)) + } +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct ResourceUsage { + pub resource_name: String, + pub total: f64, + pub per_block: f64, +} + +impl std::fmt::Display for ResourceUsage { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{:<32}{:>12.3}{:>12.3}", self.resource_name.cyan(), self.total, self.per_block) + } +} + +impl ResourceUsage { + fn average_by_resource_name(usages: &[&Self]) -> Vec { + let mut by_name: HashMap> = Default::default(); + for usage in usages { + by_name.entry(usage.resource_name.clone()).or_default().push(usage); + } + let mut average = vec![]; + for (resource_name, values) in by_name { + let total = values.iter().map(|v| v.total).sum::() / values.len() as f64; + let per_block = values.iter().map(|v| v.per_block).sum::() / values.len() as f64; + average.push(Self { resource_name, total, per_block }); + } + average + } + + fn diff(&self, other: &Self) -> f64 { + (self.per_block - other.per_block).abs() / self.per_block + } +} + +type ResourceUsageCheck<'a> = (&'a str, f64, f64); diff --git a/polkadot/node/subsystem-types/Cargo.toml b/polkadot/node/subsystem-types/Cargo.toml index ffd05cca2e9306ca8cb311cc8c93e9a65c0732ff..54c8f7e2ade773f1df84fc8cef1825c266364633 100644 --- a/polkadot/node/subsystem-types/Cargo.toml +++ b/polkadot/node/subsystem-types/Cargo.toml @@ -28,6 +28,6 @@ sc-client-api = { path = "../../../substrate/client/api" } sc-transaction-pool-api = { path = "../../../substrate/client/transaction-pool/api" } smallvec = "1.8.0" substrate-prometheus-endpoint = { path = "../../../substrate/utils/prometheus" } -thiserror = "1.0.48" +thiserror = { workspace = true } async-trait = "0.1.74" bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index 549e43a671d6b4f9bd59659647cdaff8a69125d4..f11291fd0ea8c157d72a24349e94196178529446 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -1112,6 +1112,9 @@ pub struct ProspectiveValidationDataRequest { /// is present in and the depths of that tree the candidate is present in. pub type FragmentTreeMembership = Vec<(Hash, Vec)>; +/// A collection of ancestor candidates of a parachain. +pub type Ancestors = HashSet; + /// Messages sent to the Prospective Parachains subsystem. #[derive(Debug)] pub enum ProspectiveParachainsMessage { @@ -1128,15 +1131,18 @@ pub enum ProspectiveParachainsMessage { /// has been backed. This requires that the candidate was successfully introduced in /// the past. CandidateBacked(ParaId, CandidateHash), - /// Get N backable candidate hashes along with their relay parents for the given parachain, - /// under the given relay-parent hash, which is a descendant of the given candidate hashes. + /// Try getting N backable candidate hashes along with their relay parents for the given + /// parachain, under the given relay-parent hash, which is a descendant of the given ancestors. + /// Timed out ancestors should not be included in the collection. /// N should represent the number of scheduled cores of this ParaId. - /// Returns `None` on the channel if no such candidate exists. + /// A timed out ancestor frees the cores of all of its descendants, so if there's a hole in the + /// supplied ancestor path, we'll get candidates that backfill those timed out slots first. It + /// may also return less/no candidates, if there aren't enough backable candidates recorded. GetBackableCandidates( Hash, ParaId, u32, - Vec, + Ancestors, oneshot::Sender>, ), /// Get the hypothetical frontier membership of candidates with the given properties diff --git a/polkadot/node/subsystem-types/src/runtime_client.rs b/polkadot/node/subsystem-types/src/runtime_client.rs index 7f6183076101b4474e8059435b42a69b108fbb05..4039fc9127da95e19e0aca427740793c39131ab9 100644 --- a/polkadot/node/subsystem-types/src/runtime_client.rs +++ b/polkadot/node/subsystem-types/src/runtime_client.rs @@ -26,13 +26,13 @@ use polkadot_primitives::{ PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; -use sc_client_api::HeaderBackend; +use sc_client_api::{AuxStore, HeaderBackend}; use sc_transaction_pool_api::OffchainTransactionPoolFactory; use sp_api::{ApiError, ApiExt, ProvideRuntimeApi}; use sp_authority_discovery::AuthorityDiscoveryApi; -use sp_blockchain::Info; +use sp_blockchain::{BlockStatus, Info}; use sp_consensus_babe::{BabeApi, Epoch}; -use sp_runtime::traits::{Header as HeaderT, NumberFor}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; use std::{collections::BTreeMap, sync::Arc}; /// Offers header utilities. @@ -595,3 +595,61 @@ where self.client.runtime_api().approval_voting_params(at) } } + +impl HeaderBackend for DefaultSubsystemClient +where + Client: HeaderBackend, + Block: sp_runtime::traits::Block, +{ + fn header( + &self, + hash: Block::Hash, + ) -> sc_client_api::blockchain::Result> { + self.client.header(hash) + } + + fn info(&self) -> Info { + self.client.info() + } + + fn status(&self, hash: Block::Hash) -> sc_client_api::blockchain::Result { + self.client.status(hash) + } + + fn number( + &self, + hash: Block::Hash, + ) -> sc_client_api::blockchain::Result::Header as HeaderT>::Number>> { + self.client.number(hash) + } + + fn hash( + &self, + number: NumberFor, + ) -> sc_client_api::blockchain::Result> { + self.client.hash(number) + } +} + +impl AuxStore for DefaultSubsystemClient +where + Client: AuxStore, +{ + fn insert_aux< + 'a, + 'b: 'a, + 'c: 'a, + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> sp_blockchain::Result<()> { + self.client.insert_aux(insert, delete) + } + + fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result>> { + self.client.get_aux(key) + } +} diff --git a/polkadot/node/subsystem-util/Cargo.toml b/polkadot/node/subsystem-util/Cargo.toml index b2a643d92f69698e7de5017ae3f8aa4f5504a644..a668f8de76a0bdf15b1e4498924279b5fc467d3e 100644 --- a/polkadot/node/subsystem-util/Cargo.toml +++ b/polkadot/node/subsystem-util/Cargo.toml @@ -18,7 +18,7 @@ parity-scale-codec = { version = "3.6.1", default-features = false, features = [ parking_lot = "0.12.1" pin-project = "1.0.9" rand = "0.8.5" -thiserror = "1.0.48" +thiserror = { workspace = true } fatality = "0.0.6" gum = { package = "tracing-gum", path = "../gum" } derive_more = "0.99.17" diff --git a/polkadot/node/test/service/Cargo.toml b/polkadot/node/test/service/Cargo.toml index 47a754892ed71be575f5d64957906d94e1c89d4c..e7892abcd87bf09140b53f5f123798b2ccce7ac4 100644 --- a/polkadot/node/test/service/Cargo.toml +++ b/polkadot/node/test/service/Cargo.toml @@ -14,7 +14,7 @@ futures = "0.3.21" hex = "0.4.3" gum = { package = "tracing-gum", path = "../../gum" } rand = "0.8.5" -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } tempfile = "3.2.0" tokio = "1.24.2" diff --git a/polkadot/node/test/service/src/chain_spec.rs b/polkadot/node/test/service/src/chain_spec.rs index 0295090b9521551533d012addeda064595dbeac3..f14fa9fde58b4426690cd1bd0e16129f095c34ec 100644 --- a/polkadot/node/test/service/src/chain_spec.rs +++ b/polkadot/node/test/service/src/chain_spec.rs @@ -19,7 +19,9 @@ use babe_primitives::AuthorityId as BabeId; use grandpa::AuthorityId as GrandpaId; use pallet_staking::Forcing; -use polkadot_primitives::{AccountId, AssignmentId, ValidatorId, MAX_CODE_SIZE, MAX_POV_SIZE}; +use polkadot_primitives::{ + vstaging::SchedulerParams, AccountId, AssignmentId, ValidatorId, MAX_CODE_SIZE, MAX_POV_SIZE, +}; use polkadot_service::chain_spec::{get_account_id_from_seed, get_from_seed, Extensions}; use polkadot_test_runtime::BABE_GENESIS_EPOCH_CONFIG; use sc_chain_spec::{ChainSpec, ChainType}; @@ -165,10 +167,14 @@ fn polkadot_testnet_genesis( max_code_size: MAX_CODE_SIZE, max_pov_size: MAX_POV_SIZE, max_head_data_size: 32 * 1024, - group_rotation_frequency: 20, - paras_availability_period: 4, no_show_slots: 10, minimum_validation_upgrade_delay: 5, + max_downward_message_size: 1024, + scheduler_params: SchedulerParams { + group_rotation_frequency: 20, + paras_availability_period: 4, + ..Default::default() + }, ..Default::default() }, } diff --git a/polkadot/node/test/service/src/lib.rs b/polkadot/node/test/service/src/lib.rs index a3af977ab3e4a8a68fee7b4c269720ef1a2e8af7..ca904bae28db2dcc2f0a2e50e1cf92528a135003 100644 --- a/polkadot/node/test/service/src/lib.rs +++ b/polkadot/node/test/service/src/lib.rs @@ -44,8 +44,8 @@ use sc_network::{ }; use sc_service::{ config::{ - DatabaseSource, KeystoreConfig, MultiaddrWithPeerId, WasmExecutionMethod, - WasmtimeInstantiationStrategy, + DatabaseSource, KeystoreConfig, MultiaddrWithPeerId, RpcBatchRequestConfig, + WasmExecutionMethod, WasmtimeInstantiationStrategy, }, BasePath, BlocksPruning, Configuration, Role, RpcHandlers, TaskManager, }; @@ -186,6 +186,7 @@ pub fn node_config( rpc_max_subs_per_conn: Default::default(), rpc_port: 9944, rpc_message_buffer_capacity: Default::default(), + rpc_batch_config: RpcBatchRequestConfig::Unlimited, rpc_rate_limit: None, prometheus_config: None, telemetry_endpoints: None, diff --git a/polkadot/node/zombienet-backchannel/Cargo.toml b/polkadot/node/zombienet-backchannel/Cargo.toml index ec0521a592dc52ef81b619725e3c7d44cb96b148..fa99490a997434eedee977121f13d8dcc1a9b5a1 100644 --- a/polkadot/node/zombienet-backchannel/Cargo.toml +++ b/polkadot/node/zombienet-backchannel/Cargo.toml @@ -19,7 +19,7 @@ futures-util = "0.3.30" lazy_static = "1.4.0" parity-scale-codec = { version = "3.6.1", features = ["derive"] } reqwest = { version = "0.11", features = ["rustls-tls"], default-features = false } -thiserror = "1.0.48" +thiserror = { workspace = true } gum = { package = "tracing-gum", path = "../gum" } -serde = { version = "1.0", features = ["derive"] } -serde_json = "1.0.113" +serde = { features = ["derive"], workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } diff --git a/polkadot/parachain/Cargo.toml b/polkadot/parachain/Cargo.toml index 843c4fe3e9527508dbee77eb72ea694e1ea18b57..d8c3cea7ad8b16062f346c12842d5f652936797c 100644 --- a/polkadot/parachain/Cargo.toml +++ b/polkadot/parachain/Cargo.toml @@ -24,7 +24,7 @@ derive_more = "0.99.11" bounded-collections = { version = "0.2.0", default-features = false, features = ["serde"] } # all optional crates. -serde = { version = "1.0.196", default-features = false, features = ["alloc", "derive"] } +serde = { features = ["alloc", "derive"], workspace = true } [features] default = ["std"] diff --git a/polkadot/primitives/Cargo.toml b/polkadot/primitives/Cargo.toml index fa3e949d691beb8ffafbfe4dc99c429632c91fac..e63fb621c7880c03fe0282a5500df69d20528f99 100644 --- a/polkadot/primitives/Cargo.toml +++ b/polkadot/primitives/Cargo.toml @@ -14,7 +14,8 @@ bitvec = { version = "1.0.0", default-features = false, features = ["alloc", "se hex-literal = "0.4.1" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive", "serde"] } -serde = { version = "1.0.196", default-features = false, features = ["alloc", "derive"] } +log = { workspace = true, default-features = false } +serde = { features = ["alloc", "derive"], workspace = true } application-crypto = { package = "sp-application-crypto", path = "../../substrate/primitives/application-crypto", default-features = false, features = ["serde"] } inherents = { package = "sp-inherents", path = "../../substrate/primitives/inherents", default-features = false } @@ -38,6 +39,7 @@ std = [ "application-crypto/std", "bitvec/std", "inherents/std", + "log/std", "parity-scale-codec/std", "polkadot-core-primitives/std", "polkadot-parachain-primitives/std", diff --git a/polkadot/primitives/src/v6/executor_params.rs b/polkadot/primitives/src/v6/executor_params.rs index 112a529f62b0570e9270d1f09e397e6b9dc358b0..1e19f3b23fec9b9889d5976380f52ab6b66072b4 100644 --- a/polkadot/primitives/src/v6/executor_params.rs +++ b/polkadot/primitives/src/v6/executor_params.rs @@ -159,7 +159,9 @@ impl sp_std::fmt::LowerHex for ExecutorParamsHash { // into individual fields of the structure. Thus, complex migrations shall be avoided when adding // new entries and removing old ones. At the moment, there's no mandatory parameters defined. If // they show up, they must be clearly documented as mandatory ones. -#[derive(Clone, Debug, Encode, Decode, PartialEq, Eq, TypeInfo, Serialize, Deserialize)] +#[derive( + Clone, Debug, Default, Encode, Decode, PartialEq, Eq, TypeInfo, Serialize, Deserialize, +)] pub struct ExecutorParams(Vec); impl ExecutorParams { @@ -334,9 +336,3 @@ impl From<&[ExecutorParam]> for ExecutorParams { ExecutorParams(arr.to_vec()) } } - -impl Default for ExecutorParams { - fn default() -> Self { - ExecutorParams(vec![]) - } -} diff --git a/polkadot/primitives/src/v6/mod.rs b/polkadot/primitives/src/v6/mod.rs index 4938d20d2d1bd5c4aad82a99704eb715bbc66197..cab34deeb503b5a5606fef1934c0ef4238b03e31 100644 --- a/polkadot/primitives/src/v6/mod.rs +++ b/polkadot/primitives/src/v6/mod.rs @@ -16,7 +16,7 @@ //! `V6` Primitives. -use bitvec::vec::BitVec; +use bitvec::{field::BitField, slice::BitSlice, vec::BitVec}; use parity_scale_codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_std::{ @@ -72,6 +72,7 @@ pub use metrics::{ /// The key type ID for a collator key. pub const COLLATOR_KEY_TYPE_ID: KeyTypeId = KeyTypeId(*b"coll"); +const LOG_TARGET: &str = "runtime::primitives"; mod collator_app { use application_crypto::{app_crypto, sr25519}; @@ -532,18 +533,6 @@ impl CandidateReceipt { } } -/// All data pertaining to the execution of a para candidate. -#[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, RuntimeDebug)] -pub struct FullCandidateReceipt { - /// The inner candidate receipt. - pub inner: CandidateReceipt, - /// The validation data derived from the relay-chain state at that - /// point. The hash of the persisted validation data should - /// match the `persisted_validation_data_hash` in the descriptor - /// of the receipt. - pub validation_data: PersistedValidationData, -} - /// A candidate-receipt with commitments directly included. #[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Hash))] @@ -706,19 +695,50 @@ pub type UncheckedSignedAvailabilityBitfields = Vec { /// The candidate referred to. - pub candidate: CommittedCandidateReceipt, + candidate: CommittedCandidateReceipt, /// The validity votes themselves, expressed as signatures. - pub validity_votes: Vec, - /// The indices of the validators within the group, expressed as a bitfield. - pub validator_indices: BitVec, + validity_votes: Vec, + /// The indices of the validators within the group, expressed as a bitfield. May be extended + /// beyond the backing group size to contain the assigned core index, if ElasticScalingMVP is + /// enabled. + validator_indices: BitVec, } impl BackedCandidate { - /// Get a reference to the descriptor of the para. + /// Constructor + pub fn new( + candidate: CommittedCandidateReceipt, + validity_votes: Vec, + validator_indices: BitVec, + core_index: Option, + ) -> Self { + let mut instance = Self { candidate, validity_votes, validator_indices }; + if let Some(core_index) = core_index { + instance.inject_core_index(core_index); + } + instance + } + + /// Get a reference to the descriptor of the candidate. pub fn descriptor(&self) -> &CandidateDescriptor { &self.candidate.descriptor } + /// Get a reference to the committed candidate receipt of the candidate. + pub fn candidate(&self) -> &CommittedCandidateReceipt { + &self.candidate + } + + /// Get a reference to the validity votes of the candidate. + pub fn validity_votes(&self) -> &[ValidityAttestation] { + &self.validity_votes + } + + /// Get a mutable reference to validity votes of the para. + pub fn validity_votes_mut(&mut self) -> &mut Vec { + &mut self.validity_votes + } + /// Compute this candidate's hash. pub fn hash(&self) -> CandidateHash where @@ -734,6 +754,48 @@ impl BackedCandidate { { self.candidate.to_plain() } + + /// Get a copy of the validator indices and the assumed core index, if any. + pub fn validator_indices_and_core_index( + &self, + core_index_enabled: bool, + ) -> (&BitSlice, Option) { + // This flag tells us if the block producers must enable Elastic Scaling MVP hack. + // It extends `BackedCandidate::validity_indices` to store a 8 bit core index. + if core_index_enabled { + let core_idx_offset = self.validator_indices.len().saturating_sub(8); + if core_idx_offset > 0 { + let (validator_indices_slice, core_idx_slice) = + self.validator_indices.split_at(core_idx_offset); + return ( + validator_indices_slice, + Some(CoreIndex(core_idx_slice.load::() as u32)), + ); + } + } + + (&self.validator_indices, None) + } + + /// Inject a core index in the validator_indices bitvec. + fn inject_core_index(&mut self, core_index: CoreIndex) { + let core_index_to_inject: BitVec = + BitVec::from_vec(vec![core_index.0 as u8]); + self.validator_indices.extend(core_index_to_inject); + } + + /// Update the validator indices and core index in the candidate. + pub fn set_validator_indices_and_core_index( + &mut self, + new_indices: BitVec, + maybe_core_index: Option, + ) { + self.validator_indices = new_indices; + + if let Some(core_index) = maybe_core_index { + self.inject_core_index(core_index); + } + } } /// Verify the backing of the given candidate. @@ -746,43 +808,65 @@ impl BackedCandidate { /// /// Returns either an error, indicating that one of the signatures was invalid or that the index /// was out-of-bounds, or the number of signatures checked. -pub fn check_candidate_backing + Clone + Encode>( - backed: &BackedCandidate, +pub fn check_candidate_backing + Clone + Encode + core::fmt::Debug>( + candidate_hash: CandidateHash, + validity_votes: &[ValidityAttestation], + validator_indices: &BitSlice, signing_context: &SigningContext, group_len: usize, validator_lookup: impl Fn(usize) -> Option, ) -> Result { - if backed.validator_indices.len() != group_len { + if validator_indices.len() != group_len { + log::debug!( + target: LOG_TARGET, + "Check candidate backing: indices mismatch: group_len = {} , indices_len = {}", + group_len, + validator_indices.len(), + ); return Err(()) } - if backed.validity_votes.len() > group_len { + if validity_votes.len() > group_len { + log::debug!( + target: LOG_TARGET, + "Check candidate backing: Too many votes, expected: {}, found: {}", + group_len, + validity_votes.len(), + ); return Err(()) } - // this is known, even in runtime, to be blake2-256. - let hash = backed.candidate.hash(); - let mut signed = 0; - for ((val_in_group_idx, _), attestation) in backed - .validator_indices + for ((val_in_group_idx, _), attestation) in validator_indices .iter() .enumerate() .filter(|(_, signed)| **signed) - .zip(backed.validity_votes.iter()) + .zip(validity_votes.iter()) { let validator_id = validator_lookup(val_in_group_idx).ok_or(())?; - let payload = attestation.signed_payload(hash, signing_context); + let payload = attestation.signed_payload(candidate_hash, signing_context); let sig = attestation.signature(); if sig.verify(&payload[..], &validator_id) { signed += 1; } else { + log::debug!( + target: LOG_TARGET, + "Check candidate backing: Invalid signature. validator_id = {:?}, validator_index = {} ", + validator_id, + val_in_group_idx, + ); return Err(()) } } - if signed != backed.validity_votes.len() { + if signed != validity_votes.len() { + log::error!( + target: LOG_TARGET, + "Check candidate backing: Too many signatures, expected = {}, found = {}", + validity_votes.len(), + signed, + ); return Err(()) } @@ -1859,6 +1943,34 @@ pub enum PvfExecKind { #[cfg(test)] mod tests { use super::*; + use bitvec::bitvec; + use primitives::sr25519; + + pub fn dummy_committed_candidate_receipt() -> CommittedCandidateReceipt { + let zeros = Hash::zero(); + + CommittedCandidateReceipt { + descriptor: CandidateDescriptor { + para_id: 0.into(), + relay_parent: zeros, + collator: CollatorId::from(sr25519::Public::from_raw([0; 32])), + persisted_validation_data_hash: zeros, + pov_hash: zeros, + erasure_root: zeros, + signature: CollatorSignature::from(sr25519::Signature([0u8; 64])), + para_head: zeros, + validation_code_hash: ValidationCode(vec![1, 2, 3, 4, 5, 6, 7, 8, 9]).hash(), + }, + commitments: CandidateCommitments { + head_data: HeadData(vec![]), + upward_messages: vec![].try_into().expect("empty vec fits within bounds"), + new_validation_code: None, + horizontal_messages: vec![].try_into().expect("empty vec fits within bounds"), + processed_downward_messages: 0, + hrmp_watermark: 0_u32, + }, + } + } #[test] fn group_rotation_info_calculations() { @@ -1933,4 +2045,73 @@ mod tests { assert!(zero_b.leading_zeros() >= zero_u.leading_zeros()); } + + #[test] + fn test_backed_candidate_injected_core_index() { + let initial_validator_indices = bitvec![u8, bitvec::order::Lsb0; 0, 1, 0, 1]; + let mut candidate = BackedCandidate::new( + dummy_committed_candidate_receipt(), + vec![], + initial_validator_indices.clone(), + None, + ); + + // No core index supplied, ElasticScalingMVP is off. + let (validator_indices, core_index) = candidate.validator_indices_and_core_index(false); + assert_eq!(validator_indices, initial_validator_indices.as_bitslice()); + assert!(core_index.is_none()); + + // No core index supplied, ElasticScalingMVP is on. Still, decoding will be ok if backing + // group size is <= 8, to give a chance to parachains that don't have multiple cores + // assigned. + let (validator_indices, core_index) = candidate.validator_indices_and_core_index(true); + assert_eq!(validator_indices, initial_validator_indices.as_bitslice()); + assert!(core_index.is_none()); + + let encoded_validator_indices = candidate.validator_indices.clone(); + candidate.set_validator_indices_and_core_index(validator_indices.into(), core_index); + assert_eq!(candidate.validator_indices, encoded_validator_indices); + + // No core index supplied, ElasticScalingMVP is on. Decoding is corrupted if backing group + // size larger than 8. + let candidate = BackedCandidate::new( + dummy_committed_candidate_receipt(), + vec![], + bitvec![u8, bitvec::order::Lsb0; 0, 1, 0, 1, 0, 1, 0, 1, 0], + None, + ); + let (validator_indices, core_index) = candidate.validator_indices_and_core_index(true); + assert_eq!(validator_indices, bitvec![u8, bitvec::order::Lsb0; 0].as_bitslice()); + assert!(core_index.is_some()); + + // Core index supplied, ElasticScalingMVP is off. Core index will be treated as normal + // validator indices. Runtime will check against this. + let candidate = BackedCandidate::new( + dummy_committed_candidate_receipt(), + vec![], + bitvec![u8, bitvec::order::Lsb0; 0, 1, 0, 1], + Some(CoreIndex(10)), + ); + let (validator_indices, core_index) = candidate.validator_indices_and_core_index(false); + assert_eq!( + validator_indices, + bitvec![u8, bitvec::order::Lsb0; 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0] + ); + assert!(core_index.is_none()); + + // Core index supplied, ElasticScalingMVP is on. + let mut candidate = BackedCandidate::new( + dummy_committed_candidate_receipt(), + vec![], + bitvec![u8, bitvec::order::Lsb0; 0, 1, 0, 1], + Some(CoreIndex(10)), + ); + let (validator_indices, core_index) = candidate.validator_indices_and_core_index(true); + assert_eq!(validator_indices, bitvec![u8, bitvec::order::Lsb0; 0, 1, 0, 1]); + assert_eq!(core_index, Some(CoreIndex(10))); + + let encoded_validator_indices = candidate.validator_indices.clone(); + candidate.set_validator_indices_and_core_index(validator_indices.into(), core_index); + assert_eq!(candidate.validator_indices, encoded_validator_indices); + } } diff --git a/polkadot/primitives/src/v6/slashing.rs b/polkadot/primitives/src/v6/slashing.rs index efffb932cdc0d997e08f9d195e2c42b857a7c8f5..bcd7d0c2fc4455e2ebf01fab027dbf0618f63dba 100644 --- a/polkadot/primitives/src/v6/slashing.rs +++ b/polkadot/primitives/src/v6/slashing.rs @@ -54,7 +54,7 @@ impl DisputesTimeSlot { /// is required to identify and verify it. #[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, Debug)] pub struct DisputeProof { - /// Time slot when the dispute occured. + /// Time slot when the dispute occurred. pub time_slot: DisputesTimeSlot, /// The dispute outcome. pub kind: SlashingOffenceKind, diff --git a/polkadot/primitives/src/vstaging/mod.rs b/polkadot/primitives/src/vstaging/mod.rs index 630bcf8679ad3046ce734042a1557058ea440110..bd2a5106c44de5895fb327957161c84f13a983b0 100644 --- a/polkadot/primitives/src/vstaging/mod.rs +++ b/polkadot/primitives/src/vstaging/mod.rs @@ -23,6 +23,7 @@ use sp_std::prelude::*; use parity_scale_codec::{Decode, Encode}; use primitives::RuntimeDebug; use scale_info::TypeInfo; +use sp_arithmetic::Perbill; /// Approval voting configuration parameters #[derive( @@ -50,6 +51,81 @@ impl Default for ApprovalVotingParams { } } +/// Scheduler configuration parameters. All coretime/ondemand parameters are here. +#[derive( + RuntimeDebug, + Copy, + Clone, + PartialEq, + Encode, + Decode, + TypeInfo, + serde::Serialize, + serde::Deserialize, +)] +pub struct SchedulerParams { + /// How often parachain groups should be rotated across parachains. + /// + /// Must be non-zero. + pub group_rotation_frequency: BlockNumber, + /// Availability timeout for a block on a core, measured in blocks. + /// + /// This is the maximum amount of blocks after a core became occupied that validators have time + /// to make the block available. + /// + /// This value only has effect on group rotations. If backers backed something at the end of + /// their rotation, the occupied core affects the backing group that comes afterwards. We limit + /// the effect one backing group can have on the next to `paras_availability_period` blocks. + /// + /// Within a group rotation there is no timeout as backers are only affecting themselves. + /// + /// Must be at least 1. With a value of 1, the previous group will not be able to negatively + /// affect the following group at the expense of a tight availability timeline at group + /// rotation boundaries. + pub paras_availability_period: BlockNumber, + /// The maximum number of validators to have per core. + /// + /// `None` means no maximum. + pub max_validators_per_core: Option, + /// The amount of blocks ahead to schedule paras. + pub lookahead: u32, + /// How many cores are managed by the coretime chain. + pub num_cores: u32, + /// The max number of times a claim can time out in availability. + pub max_availability_timeouts: u32, + /// The maximum queue size of the pay as you go module. + pub on_demand_queue_max_size: u32, + /// The target utilization of the spot price queue in percentages. + pub on_demand_target_queue_utilization: Perbill, + /// How quickly the fee rises in reaction to increased utilization. + /// The lower the number the slower the increase. + pub on_demand_fee_variability: Perbill, + /// The minimum amount needed to claim a slot in the spot pricing queue. + pub on_demand_base_fee: Balance, + /// The number of blocks a claim stays in the scheduler's claimqueue before getting cleared. + /// This number should go reasonably higher than the number of blocks in the async backing + /// lookahead. + pub ttl: BlockNumber, +} + +impl> Default for SchedulerParams { + fn default() -> Self { + Self { + group_rotation_frequency: 1u32.into(), + paras_availability_period: 1u32.into(), + max_validators_per_core: Default::default(), + lookahead: 1, + num_cores: Default::default(), + max_availability_timeouts: Default::default(), + on_demand_queue_max_size: ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, + on_demand_target_queue_utilization: Perbill::from_percent(25), + on_demand_fee_variability: Perbill::from_percent(3), + on_demand_base_fee: 10_000_000u128, + ttl: 5u32.into(), + } + } +} + use bitvec::vec::BitVec; /// Bit indices in the `HostConfiguration.node_features` that correspond to different node features. @@ -64,9 +140,13 @@ pub mod node_features { /// Tells if tranch0 assignments could be sent in a single certificate. /// Reserved for: `` EnableAssignmentsV2 = 0, + /// This feature enables the extension of `BackedCandidate::validator_indices` by 8 bits. + /// The value stored there represents the assumed core index where the candidates + /// are backed. This is needed for the elastic scaling MVP. + ElasticScalingMVP = 1, /// First unassigned feature bit. /// Every time a new feature flag is assigned it should take this value. /// and this should be incremented. - FirstUnassigned = 1, + FirstUnassigned = 2, } } diff --git a/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md b/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md index e0984bd58d1dd8ea22b145a15b4a3bab8779de80..39a317a07c847aa734e78f85de524f5541c8ddf6 100644 --- a/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md +++ b/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md @@ -125,6 +125,14 @@ execution request: reason, which may or may not be independent of the candidate or PVF. 5. **Internal errors:** See "Internal Errors" section. In this case, after the retry we abstain from voting. +6. **RuntimeConstruction** error. The precheck handles a general case of a wrong + artifact but doesn't guarantee its consistency between the preparation and + the execution. If something happened with the artifact between + the preparation of the artifact and its execution (e.g. the artifact was + corrupted on disk or a dirty node upgrade happened when the prepare worker + has a wasmtime version different from the execute worker's wasmtime version). + We treat such an error as possibly transient due to local issues and retry + one time. ### Preparation timeouts diff --git a/polkadot/roadmap/implementers-guide/src/types/README.md b/polkadot/roadmap/implementers-guide/src/types/README.md index 87092bf3a005353b09d375b89f599c2918295ef5..5fd3050f94906f04e876e9018698edb1a5218e2f 100644 --- a/polkadot/roadmap/implementers-guide/src/types/README.md +++ b/polkadot/roadmap/implementers-guide/src/types/README.md @@ -55,17 +55,6 @@ digraph { CandidateCommitmentsHash [label = "Hash", shape="doublecircle", fill="gray90"] CandidateCommitmentsHash -> CandidateCommitments:name - FullCandidateReceipt [label = < - - - - -
FullCandidateReceipt<H = Hash, N = BlockNumber>
innerCandidateReceipt<H>
validation_dataValidationData<N>
- >] - - FullCandidateReceipt:inner -> CandidateReceipt:name - FullCandidateReceipt:validation_data -> ValidationData:name - CommittedCandidateReceipt [label = < diff --git a/polkadot/roadmap/implementers-guide/src/types/candidate.md b/polkadot/roadmap/implementers-guide/src/types/candidate.md index 00176229e5a4356a9f88ae91c16eb46617b47216..399d7854ac4a057f49435702adb1a815c960c593 100644 --- a/polkadot/roadmap/implementers-guide/src/types/candidate.md +++ b/polkadot/roadmap/implementers-guide/src/types/candidate.md @@ -20,12 +20,8 @@ struct ParaId(u32); ## Candidate Receipt -Much info in a [`FullCandidateReceipt`](#full-candidate-receipt) is duplicated from the relay-chain state. When the -corresponding relay-chain state is considered widely available, the Candidate Receipt should be favored over the -`FullCandidateReceipt`. - -Examples of situations where the state is readily available includes within the scope of work done by subsystems working -on a given relay-parent, or within the logic of the runtime importing a backed candidate. +Compact representation of the result of a validation. This is what validators +receive from collators, together with the PoV. ```rust /// A candidate-receipt. @@ -37,24 +33,6 @@ struct CandidateReceipt { } ``` -## Full Candidate Receipt - -This is the full receipt type. The `PersistedValidationData` are technically redundant with the `inner.relay_parent`, -which uniquely describes the block in the blockchain from whose state these values are derived. The -[`CandidateReceipt`](#candidate-receipt) variant is often used instead for this reason. - -However, the Full Candidate Receipt type is useful as a means of avoiding the implicit dependency on availability of old -blockchain state. In situations such as availability and approval, having the full description of the candidate within a -self-contained struct is convenient. - -```rust -/// All data pertaining to the execution of a para candidate. -struct FullCandidateReceipt { - inner: CandidateReceipt, - validation_data: PeristedValidationData, -} -``` - ## Committed Candidate Receipt This is a variant of the candidate receipt which includes the commitments of the candidate receipt alongside the diff --git a/polkadot/roadmap/implementers-guide/src/types/runtime.md b/polkadot/roadmap/implementers-guide/src/types/runtime.md index 4b97409f8df3e4f3581161ef47713bfdfbcb9654..1dfabd96db7cdb929c416201eeeb233af11f5dbb 100644 --- a/polkadot/roadmap/implementers-guide/src/types/runtime.md +++ b/polkadot/roadmap/implementers-guide/src/types/runtime.md @@ -4,106 +4,24 @@ Types used within the runtime exclusively and pervasively. ## Host Configuration -The internal-to-runtime configuration of the parachain host. This is expected to be altered only by governance procedures. - -```rust -struct HostConfiguration { - /// The minimum period, in blocks, between which parachains can update their validation code. - pub validation_upgrade_cooldown: BlockNumber, - /// The delay, in blocks, before a validation upgrade is applied. - pub validation_upgrade_delay: BlockNumber, - /// How long to keep code on-chain, in blocks. This should be sufficiently long that disputes - /// have concluded. - pub code_retention_period: BlockNumber, - /// The maximum validation code size, in bytes. - pub max_code_size: u32, - /// The maximum head-data size, in bytes. - pub max_head_data_size: u32, - /// The amount of availability cores to dedicate to parathreads (on-demand parachains). - pub parathread_cores: u32, - /// The number of retries that a parathread (on-demand parachain) author has to submit their block. - pub parathread_retries: u32, - /// How often parachain groups should be rotated across parachains. - pub group_rotation_frequency: BlockNumber, - /// The availability period, in blocks, for parachains. This is the amount of blocks - /// after inclusion that validators have to make the block available and signal its availability to - /// the chain. Must be at least 1. - pub chain_availability_period: BlockNumber, - /// The availability period, in blocks, for parathreads (on-demand parachains). Same as the `chain_availability_period`, - /// but a differing timeout due to differing requirements. Must be at least 1. - pub thread_availability_period: BlockNumber, - /// The amount of blocks ahead to schedule on-demand parachains. - pub scheduling_lookahead: u32, - /// The maximum number of validators to have per core. `None` means no maximum. - pub max_validators_per_core: Option, - /// The maximum number of validators to use for parachains, in total. `None` means no maximum. - pub max_validators: Option, - /// The amount of sessions to keep for disputes. - pub dispute_period: SessionIndex, - /// How long after dispute conclusion to accept statements. - pub dispute_post_conclusion_acceptance_period: BlockNumber, - /// The maximum number of dispute spam slots - pub dispute_max_spam_slots: u32, - /// The amount of consensus slots that must pass between submitting an assignment and - /// submitting an approval vote before a validator is considered a no-show. - /// Must be at least 1. - pub no_show_slots: u32, - /// The number of delay tranches in total. - pub n_delay_tranches: u32, - /// The width of the zeroth delay tranche for approval assignments. This many delay tranches - /// beyond 0 are all consolidated to form a wide 0 tranche. - pub zeroth_delay_tranche_width: u32, - /// The number of validators needed to approve a block. - pub needed_approvals: u32, - /// The number of samples to use in `RelayVRFModulo` or `RelayVRFModuloCompact` approval assignment criterions. - pub relay_vrf_modulo_samples: u32, - /// Total number of individual messages allowed in the parachain -> relay-chain message queue. - pub max_upward_queue_count: u32, - /// Total size of messages allowed in the parachain -> relay-chain message queue before which - /// no further messages may be added to it. If it exceeds this then the queue may contain only - /// a single message. - pub max_upward_queue_size: u32, - /// The maximum size of an upward message that can be sent by a candidate. - /// - /// This parameter affects the upper bound of size of `CandidateCommitments`. - pub max_upward_message_size: u32, - /// The maximum number of messages that a candidate can contain. - /// - /// This parameter affects the upper bound of size of `CandidateCommitments`. - pub max_upward_message_num_per_candidate: u32, - /// The maximum size of a message that can be put in a downward message queue. - /// - /// Since we require receiving at least one DMP message the obvious upper bound of the size is - /// the PoV size. Of course, there is a lot of other different things that a parachain may - /// decide to do with its PoV so this value in practice will be picked as a fraction of the PoV - /// size. - pub max_downward_message_size: u32, - /// The deposit that the sender should provide for opening an HRMP channel. - pub hrmp_sender_deposit: u32, - /// The deposit that the recipient should provide for accepting opening an HRMP channel. - pub hrmp_recipient_deposit: u32, - /// The maximum number of messages allowed in an HRMP channel at once. - pub hrmp_channel_max_capacity: u32, - /// The maximum total size of messages in bytes allowed in an HRMP channel at once. - pub hrmp_channel_max_total_size: u32, - /// The maximum number of inbound HRMP channels a parachain is allowed to accept. - pub hrmp_max_parachain_inbound_channels: u32, - /// The maximum number of inbound HRMP channels a parathread (on-demand parachain) is allowed to accept. - pub hrmp_max_parathread_inbound_channels: u32, - /// The maximum size of a message that could ever be put into an HRMP channel. - /// - /// This parameter affects the upper bound of size of `CandidateCommitments`. - pub hrmp_channel_max_message_size: u32, - /// The maximum number of outbound HRMP channels a parachain is allowed to open. - pub hrmp_max_parachain_outbound_channels: u32, - /// The maximum number of outbound HRMP channels a parathread (on-demand parachain) is allowed to open. - pub hrmp_max_parathread_outbound_channels: u32, - /// The maximum number of outbound HRMP messages can be sent by a candidate. - /// - /// This parameter affects the upper bound of size of `CandidateCommitments`. - pub hrmp_max_message_num_per_candidate: u32, -} -``` +The internal-to-runtime configuration of the parachain host is kept in `struct HostConfiguration`. This is expected to +be altered only by governance procedures or via migrations from the Polkadot-SDK codebase. The latest definition of +`HostConfiguration` can be found in the project repo +[here](https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/runtime/parachains/src/configuration.rs). Each +parameter has got a doc comment so for any details please refer to the code. + +Some related parameters in `HostConfiguration` are grouped together so that they can be managed easily. These are: +* `async_backing_params` in `struct AsyncBackingParams` +* `executor_params` in `struct ExecutorParams` +* `approval_voting_params` in `struct ApprovalVotingParams` +* `scheduler_params` in `struct SchedulerParams` + +Check the definitions of these structs for further details. + +### Configuration migrations +Modifying `HostConfiguration` requires a storage migration. These migrations are located in the +[`migrations`](https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/runtime/parachains/src/configuration.rs) +subfolder of Polkadot-SDK repo. ## ParaInherentData diff --git a/polkadot/runtime/common/Cargo.toml b/polkadot/runtime/common/Cargo.toml index 52fbfd9ac77b7d44cdaa79ea0a3f3daa363b50ef..eae5d4fb2ef90a0acb515b863dfb0013c45bac89 100644 --- a/polkadot/runtime/common/Cargo.toml +++ b/polkadot/runtime/common/Cargo.toml @@ -16,8 +16,8 @@ parity-scale-codec = { version = "3.6.1", default-features = false, features = [ log = { workspace = true } rustc-hex = { version = "2.1.0", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", default-features = false, features = ["alloc"] } -serde_derive = { version = "1.0.117" } +serde = { features = ["alloc"], workspace = true } +serde_derive = { workspace = true } static_assertions = "1.1.0" sp-api = { path = "../../../substrate/primitives/api", default-features = false } @@ -58,8 +58,6 @@ runtime-parachains = { package = "polkadot-runtime-parachains", path = "../parac slot-range-helper = { path = "slot_range_helper", default-features = false } xcm = { package = "staging-xcm", path = "../../xcm", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../../xcm/xcm-executor", default-features = false, optional = true } - -pallet-xcm-benchmarks = { path = "../../xcm/pallet-xcm-benchmarks", default-features = false, optional = true } xcm-builder = { package = "staging-xcm-builder", path = "../../xcm/xcm-builder", default-features = false } [dev-dependencies] @@ -69,7 +67,7 @@ pallet-babe = { path = "../../../substrate/frame/babe" } pallet-treasury = { path = "../../../substrate/frame/treasury" } sp-keystore = { path = "../../../substrate/primitives/keystore" } sp-keyring = { path = "../../../substrate/primitives/keyring" } -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } libsecp256k1 = "0.7.0" test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../primitives/test-helpers" } @@ -99,7 +97,6 @@ std = [ "pallet-transaction-payment/std", "pallet-treasury/std", "pallet-vesting/std", - "pallet-xcm-benchmarks/std", "parity-scale-codec/std", "primitives/std", "runtime-parachains/std", @@ -137,7 +134,6 @@ runtime-benchmarks = [ "pallet-timestamp/runtime-benchmarks", "pallet-treasury/runtime-benchmarks", "pallet-vesting/runtime-benchmarks", - "pallet-xcm-benchmarks/runtime-benchmarks", "primitives/runtime-benchmarks", "runtime-parachains/runtime-benchmarks", "sp-runtime/runtime-benchmarks", diff --git a/polkadot/runtime/common/src/assigned_slots/migration.rs b/polkadot/runtime/common/src/assigned_slots/migration.rs index ba3108c0aa38b9b9a2dabba0559d74ec3c97d0ed..def6bad692a235d3282774b0db3fabdaf33c89b3 100644 --- a/polkadot/runtime/common/src/assigned_slots/migration.rs +++ b/polkadot/runtime/common/src/assigned_slots/migration.rs @@ -29,14 +29,14 @@ pub mod v1 { impl OnRuntimeUpgrade for VersionUncheckedMigrateToV1 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { - let onchain_version = Pallet::::on_chain_storage_version(); - ensure!(onchain_version < 1, "assigned_slots::MigrateToV1 migration can be deleted"); + let on_chain_version = Pallet::::on_chain_storage_version(); + ensure!(on_chain_version < 1, "assigned_slots::MigrateToV1 migration can be deleted"); Ok(Default::default()) } fn on_runtime_upgrade() -> frame_support::weights::Weight { - let onchain_version = Pallet::::on_chain_storage_version(); - if onchain_version < 1 { + let on_chain_version = Pallet::::on_chain_storage_version(); + if on_chain_version < 1 { const MAX_PERMANENT_SLOTS: u32 = 100; const MAX_TEMPORARY_SLOTS: u32 = 100; @@ -52,8 +52,8 @@ pub mod v1 { #[cfg(feature = "try-runtime")] fn post_upgrade(_state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { - let onchain_version = Pallet::::on_chain_storage_version(); - ensure!(onchain_version == 1, "assigned_slots::MigrateToV1 needs to be run"); + let on_chain_version = Pallet::::on_chain_storage_version(); + ensure!(on_chain_version == 1, "assigned_slots::MigrateToV1 needs to be run"); assert_eq!(>::get(), 100); assert_eq!(>::get(), 100); Ok(()) diff --git a/polkadot/runtime/common/src/assigned_slots/mod.rs b/polkadot/runtime/common/src/assigned_slots/mod.rs index 3419e3497f7d446f7bea02bf8134932c0a0cdbf6..4f032f4dfa3bd76b2e3ced899cec3a0bb5b06ffe 100644 --- a/polkadot/runtime/common/src/assigned_slots/mod.rs +++ b/polkadot/runtime/common/src/assigned_slots/mod.rs @@ -107,7 +107,7 @@ type LeasePeriodOf = <::Leaser as Leaser>>::Le pub mod pallet { use super::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] diff --git a/polkadot/runtime/common/src/claims.rs b/polkadot/runtime/common/src/claims.rs index 86550ea8b4ebf97054ce6713fffcdd0dd2cf628e..68f42914e4471531e4c85314c61ca7d41a672c6d 100644 --- a/polkadot/runtime/common/src/claims.rs +++ b/polkadot/runtime/common/src/claims.rs @@ -702,7 +702,6 @@ mod tests { use secp_utils::*; use parity_scale_codec::Encode; - use sp_core::H256; // The testing primitives are very useful for avoiding having to work with signatures // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. use crate::claims; @@ -715,11 +714,8 @@ mod tests { }; use pallet_balances; use sp_runtime::{ - traits::{BlakeTwo256, Identity, IdentityLookup}, - transaction_validity::TransactionLongevity, - BuildStorage, - DispatchError::BadOrigin, - TokenError, + traits::Identity, transaction_validity::TransactionLongevity, BuildStorage, + DispatchError::BadOrigin, TokenError, }; type Block = frame_system::mocking::MockBlock; @@ -734,34 +730,13 @@ mod tests { } ); - parameter_types! { - pub const BlockHashCount: u32 = 250; - } - #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; - type Nonce = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; - type Version = (); - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); type MaxConsumers = frame_support::traits::ConstU32<16>; } diff --git a/polkadot/runtime/common/src/crowdloan/migration.rs b/polkadot/runtime/common/src/crowdloan/migration.rs index 5133c14ada9276a56ae91f38e8a67b4846979866..3afd6b3fbc94b5b30bad162cfcf8236a029b77da 100644 --- a/polkadot/runtime/common/src/crowdloan/migration.rs +++ b/polkadot/runtime/common/src/crowdloan/migration.rs @@ -24,9 +24,9 @@ use frame_support::{ pub struct MigrateToTrackInactiveV2(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToTrackInactiveV2 { fn on_runtime_upgrade() -> Weight { - let onchain_version = Pallet::::on_chain_storage_version(); + let on_chain_version = Pallet::::on_chain_storage_version(); - if onchain_version == 1 { + if on_chain_version == 1 { let mut translated = 0u64; for item in Funds::::iter_values() { let b = diff --git a/polkadot/runtime/common/src/crowdloan/mod.rs b/polkadot/runtime/common/src/crowdloan/mod.rs index 7d1b892dfa7fc6c4057a7da8731e39a9a4b2d3e8..35d075f2ff6c2f5ee3c1fb4a159eed346d6c6781 100644 --- a/polkadot/runtime/common/src/crowdloan/mod.rs +++ b/polkadot/runtime/common/src/crowdloan/mod.rs @@ -180,7 +180,7 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::{ensure_root, ensure_signed, pallet_prelude::*}; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(2); #[pallet::pallet] diff --git a/polkadot/runtime/common/src/identity_migrator.rs b/polkadot/runtime/common/src/identity_migrator.rs index 0dfb03b06ba32b65941165ec993bc694b84a1389..bf334a63e9588411f57ecc8b6ee2fca72861d7aa 100644 --- a/polkadot/runtime/common/src/identity_migrator.rs +++ b/polkadot/runtime/common/src/identity_migrator.rs @@ -31,7 +31,7 @@ use pallet_identity; use sp_core::Get; #[cfg(feature = "runtime-benchmarks")] -use frame_benchmarking::{account, impl_benchmark_test_suite, v2::*, BenchmarkError}; +use frame_benchmarking::{account, v2::*, BenchmarkError}; pub trait WeightInfo { fn reap_identity(r: u32, s: u32) -> Weight; diff --git a/polkadot/runtime/common/src/impls.rs b/polkadot/runtime/common/src/impls.rs index 1ddad37831b68582cfc14f882870922c67058f1d..4ba85a3c8353565ad17cfb5db29c3d4e1dfa8a44 100644 --- a/polkadot/runtime/common/src/impls.rs +++ b/polkadot/runtime/common/src/impls.rs @@ -107,7 +107,7 @@ pub fn era_payout( )] pub enum VersionedLocatableAsset { #[codec(index = 3)] - V3 { location: xcm::v3::MultiLocation, asset_id: xcm::v3::AssetId }, + V3 { location: xcm::v3::Location, asset_id: xcm::v3::AssetId }, #[codec(index = 4)] V4 { location: xcm::v4::Location, asset_id: xcm::v4::AssetId }, } @@ -140,7 +140,7 @@ impl TryConvert<&VersionedLocation, xcm::latest::Location> for VersionedLocation ) -> Result { let latest = match location.clone() { VersionedLocation::V2(l) => { - let v3: xcm::v3::MultiLocation = l.try_into().map_err(|_| location)?; + let v3: xcm::v3::Location = l.try_into().map_err(|_| location)?; v3.try_into().map_err(|_| location)? }, VersionedLocation::V3(l) => l.try_into().map_err(|_| location)?, @@ -191,11 +191,11 @@ pub mod benchmarks { { fn create_asset_kind(seed: u32) -> VersionedLocatableAsset { VersionedLocatableAsset::V3 { - location: xcm::v3::MultiLocation::new( + location: xcm::v3::Location::new( Parents::get(), [xcm::v3::Junction::Parachain(ParaId::get())], ), - asset_id: xcm::v3::MultiLocation::new( + asset_id: xcm::v3::Location::new( 0, [ xcm::v3::Junction::PalletInstance(seed.try_into().unwrap()), diff --git a/polkadot/runtime/common/src/paras_registrar/mod.rs b/polkadot/runtime/common/src/paras_registrar/mod.rs index 5450c4309e40745f36ae405a8614fe9e8ad2f534..4541b88ec6fe187988a949cda1338de59f258541 100644 --- a/polkadot/runtime/common/src/paras_registrar/mod.rs +++ b/polkadot/runtime/common/src/paras_registrar/mod.rs @@ -106,7 +106,7 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] diff --git a/polkadot/runtime/common/src/xcm_sender.rs b/polkadot/runtime/common/src/xcm_sender.rs index 7f1100a13619a8c5e1048afd4a34d2d977aa8914..46d09afcfb2c80215ed7ab18eb7be5750de1fcf6 100644 --- a/polkadot/runtime/common/src/xcm_sender.rs +++ b/polkadot/runtime/common/src/xcm_sender.rs @@ -136,7 +136,7 @@ where } } -/// Implementation of `pallet_xcm_benchmarks::EnsureDelivery` which helps to ensure delivery to the +/// Implementation of `xcm_builder::EnsureDelivery` which helps to ensure delivery to the /// `ParaId` parachain (sibling or child). Deposits existential deposit for origin (if needed). /// Deposits estimated fee to the origin account (if needed). /// Allows to trigger additional logic for specific `ParaId` (e.g. open HRMP channel) (if neeeded). @@ -164,7 +164,7 @@ impl< PriceForDelivery: PriceForMessageDelivery, Parachain: Get, ToParachainHelper: EnsureForParachain, - > pallet_xcm_benchmarks::EnsureDelivery + > xcm_builder::EnsureDelivery for ToParachainDeliveryHelper< XcmConfig, ExistentialDeposit, @@ -175,7 +175,7 @@ impl< { fn ensure_successful_delivery( origin_ref: &Location, - _dest: &Location, + dest: &Location, fee_reason: xcm_executor::traits::FeeReason, ) -> (Option, Option) { use xcm_executor::{ @@ -183,6 +183,15 @@ impl< FeesMode, }; + // check if the destination matches the expected `Parachain`. + if let Some(Parachain(para_id)) = dest.first_interior() { + if ParaId::from(*para_id) != Parachain::get().into() { + return (None, None) + } + } else { + return (None, None) + } + let mut fees_mode = None; if !XcmConfig::FeeManager::is_waived(Some(origin_ref), fee_reason) { // if not waived, we need to set up accounts for paying and receiving fees diff --git a/polkadot/runtime/parachains/Cargo.toml b/polkadot/runtime/parachains/Cargo.toml index 1de34400758fb0e56c45c9ac8eb67d307ffbb0fa..6104014547638dbf13c5b080a280e7cf369293c9 100644 --- a/polkadot/runtime/parachains/Cargo.toml +++ b/polkadot/runtime/parachains/Cargo.toml @@ -16,7 +16,7 @@ parity-scale-codec = { version = "3.6.1", default-features = false, features = [ log = { workspace = true } rustc-hex = { version = "2.1.0", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", default-features = false, features = ["alloc", "derive"] } +serde = { features = ["alloc", "derive"], workspace = true } derive_more = "0.99.17" bitflags = "1.3.2" @@ -69,7 +69,8 @@ sp-tracing = { path = "../../../substrate/primitives/tracing" } sp-crypto-hashing = { path = "../../../substrate/primitives/crypto/hashing" } thousands = "0.2.0" assert_matches = "1" -serde_json = "1.0.113" +rstest = "0.18.2" +serde_json = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/polkadot/runtime/parachains/src/assigner_coretime/mock_helpers.rs b/polkadot/runtime/parachains/src/assigner_coretime/mock_helpers.rs index 71c3f1fa39f7c6c127ac954b8299fb16a323e72d..e2ba0b4f7ea5d5ca2eb49c903afb764229e0491f 100644 --- a/polkadot/runtime/parachains/src/assigner_coretime/mock_helpers.rs +++ b/polkadot/runtime/parachains/src/assigner_coretime/mock_helpers.rs @@ -64,11 +64,12 @@ impl GenesisConfigBuilder { pub(super) fn build(self) -> MockGenesisConfig { let mut genesis = default_genesis_config(); let config = &mut genesis.configuration.config; - config.coretime_cores = self.on_demand_cores; - config.on_demand_base_fee = self.on_demand_base_fee; - config.on_demand_fee_variability = self.on_demand_fee_variability; - config.on_demand_queue_max_size = self.on_demand_max_queue_size; - config.on_demand_target_queue_utilization = self.on_demand_target_queue_utilization; + config.scheduler_params.num_cores = self.on_demand_cores; + config.scheduler_params.on_demand_base_fee = self.on_demand_base_fee; + config.scheduler_params.on_demand_fee_variability = self.on_demand_fee_variability; + config.scheduler_params.on_demand_queue_max_size = self.on_demand_max_queue_size; + config.scheduler_params.on_demand_target_queue_utilization = + self.on_demand_target_queue_utilization; let paras = &mut genesis.paras.paras; for para_id in self.onboarded_on_demand_chains { diff --git a/polkadot/runtime/parachains/src/assigner_coretime/mod.rs b/polkadot/runtime/parachains/src/assigner_coretime/mod.rs index 9da81dc816cabeb7019e44b8f88c0f526582830d..e2da89fadd4800f5de32650178766e1ed85bbd17 100644 --- a/polkadot/runtime/parachains/src/assigner_coretime/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_coretime/mod.rs @@ -30,7 +30,7 @@ mod tests; use crate::{ assigner_on_demand, configuration, paras::AssignCoretime, - scheduler::common::{Assignment, AssignmentProvider, AssignmentProviderConfig}, + scheduler::common::{Assignment, AssignmentProvider}, ParaId, }; @@ -316,14 +316,6 @@ impl AssignmentProvider> for Pallet { } } - fn get_provider_config(_core_idx: CoreIndex) -> AssignmentProviderConfig> { - let config = >::config(); - AssignmentProviderConfig { - max_availability_timeouts: config.on_demand_retries, - ttl: config.on_demand_ttl, - } - } - #[cfg(any(feature = "runtime-benchmarks", test))] fn get_mock_assignment(_: CoreIndex, para_id: primitives::Id) -> Assignment { // Given that we are not tracking anything in `Bulk` assignments, it is safe to always @@ -333,7 +325,7 @@ impl AssignmentProvider> for Pallet { fn session_core_count() -> u32 { let config = >::config(); - config.coretime_cores + config.scheduler_params.num_cores } } @@ -482,8 +474,8 @@ impl AssignCoretime for Pallet { // Add a new core and assign the para to it. let mut config = >::config(); - let core = config.coretime_cores; - config.coretime_cores.saturating_inc(); + let core = config.scheduler_params.num_cores; + config.scheduler_params.num_cores.saturating_inc(); // `assign_coretime` is only called at genesis or by root, so setting the active // config here is fine. diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs b/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs index 5a6060cd2b4eab88867088dee30b6fb1047bdf20..8360e7a78d0a5a155f5b9d63ffae05bb85e44f32 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs @@ -43,7 +43,7 @@ where { ParasShared::::set_session_index(SESSION_INDEX); let mut config = HostConfiguration::default(); - config.coretime_cores = 1; + config.scheduler_params.num_cores = 1; ConfigurationPallet::::force_set_active_config(config); let mut parachains = ParachainsCache::new(); ParasPallet::::initialize_para_now( diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/mock_helpers.rs b/polkadot/runtime/parachains/src/assigner_on_demand/mock_helpers.rs index de30330ac84e0a7715799d71d26fb42ce48efff8..f8d1a894f0e4d5c619377ba4c34158c8f6d60597 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/mock_helpers.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/mock_helpers.rs @@ -63,11 +63,12 @@ impl GenesisConfigBuilder { pub(super) fn build(self) -> MockGenesisConfig { let mut genesis = default_genesis_config(); let config = &mut genesis.configuration.config; - config.coretime_cores = self.on_demand_cores; - config.on_demand_base_fee = self.on_demand_base_fee; - config.on_demand_fee_variability = self.on_demand_fee_variability; - config.on_demand_queue_max_size = self.on_demand_max_queue_size; - config.on_demand_target_queue_utilization = self.on_demand_target_queue_utilization; + config.scheduler_params.num_cores = self.on_demand_cores; + config.scheduler_params.on_demand_base_fee = self.on_demand_base_fee; + config.scheduler_params.on_demand_fee_variability = self.on_demand_fee_variability; + config.scheduler_params.on_demand_queue_max_size = self.on_demand_max_queue_size; + config.scheduler_params.on_demand_target_queue_utilization = + self.on_demand_target_queue_utilization; let paras = &mut genesis.paras.paras; for para_id in self.onboarded_on_demand_chains { diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs index 1b746e88694c9f105db119d351a76e336fd3fdba..bc450dc78129ad7404627e49b9882e2723c09f08 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs @@ -201,10 +201,10 @@ pub mod pallet { let old_traffic = SpotTraffic::::get(); match Self::calculate_spot_traffic( old_traffic, - config.on_demand_queue_max_size, + config.scheduler_params.on_demand_queue_max_size, Self::queue_size(), - config.on_demand_target_queue_utilization, - config.on_demand_fee_variability, + config.scheduler_params.on_demand_target_queue_utilization, + config.scheduler_params.on_demand_fee_variability, ) { Ok(new_traffic) => { // Only update storage on change @@ -330,8 +330,9 @@ where let traffic = SpotTraffic::::get(); // Calculate spot price - let spot_price: BalanceOf = - traffic.saturating_mul_int(config.on_demand_base_fee.saturated_into::>()); + let spot_price: BalanceOf = traffic.saturating_mul_int( + config.scheduler_params.on_demand_base_fee.saturated_into::>(), + ); // Is the current price higher than `max_amount` ensure!(spot_price.le(&max_amount), Error::::SpotPriceHigherThanMaxAmount); @@ -450,7 +451,10 @@ where OnDemandQueue::::try_mutate(|queue| { // Abort transaction if queue is too large - ensure!(Self::queue_size() < config.on_demand_queue_max_size, Error::::QueueFull); + ensure!( + Self::queue_size() < config.scheduler_params.on_demand_queue_max_size, + Error::::QueueFull + ); match location { QueuePushDirection::Back => queue.push_back(order), QueuePushDirection::Front => queue.push_front(order), @@ -472,7 +476,7 @@ where target: LOG_TARGET, "Failed to fetch the on demand queue size, returning the max size." ); - return config.on_demand_queue_max_size + return config.scheduler_params.on_demand_queue_max_size }, } } diff --git a/polkadot/runtime/parachains/src/assigner_parachains.rs b/polkadot/runtime/parachains/src/assigner_parachains.rs index 34b5d3c1ec51811e8e4c50255592b1e9344014d8..b5f342563e9763e9dba0fa8cbc7afd8e84dc59da 100644 --- a/polkadot/runtime/parachains/src/assigner_parachains.rs +++ b/polkadot/runtime/parachains/src/assigner_parachains.rs @@ -27,7 +27,7 @@ use primitives::CoreIndex; use crate::{ configuration, paras, - scheduler::common::{Assignment, AssignmentProvider, AssignmentProviderConfig}, + scheduler::common::{Assignment, AssignmentProvider}, }; pub use pallet::*; @@ -58,16 +58,6 @@ impl AssignmentProvider> for Pallet { /// this is a no-op in the case of a bulk assignment slot. fn push_back_assignment(_: Assignment) {} - fn get_provider_config(_core_idx: CoreIndex) -> AssignmentProviderConfig> { - AssignmentProviderConfig { - // The next assignment already goes to the same [`ParaId`], no timeout tracking needed. - max_availability_timeouts: 0, - // The next assignment already goes to the same [`ParaId`], this can be any number - // that's high enough to clear the time it takes to clear backing/availability. - ttl: 10u32.into(), - } - } - #[cfg(any(feature = "runtime-benchmarks", test))] fn get_mock_assignment(_: CoreIndex, para_id: primitives::Id) -> Assignment { Assignment::Bulk(para_id) diff --git a/polkadot/runtime/parachains/src/assigner_parachains/mock_helpers.rs b/polkadot/runtime/parachains/src/assigner_parachains/mock_helpers.rs index e6e9fb074aa97e87e6fe92819cc57e5bfa2ca656..a46e114daeaf458fb6bc985a5bb2ebad951d7e3b 100644 --- a/polkadot/runtime/parachains/src/assigner_parachains/mock_helpers.rs +++ b/polkadot/runtime/parachains/src/assigner_parachains/mock_helpers.rs @@ -60,11 +60,12 @@ impl GenesisConfigBuilder { pub(super) fn build(self) -> MockGenesisConfig { let mut genesis = default_genesis_config(); let config = &mut genesis.configuration.config; - config.coretime_cores = self.on_demand_cores; - config.on_demand_base_fee = self.on_demand_base_fee; - config.on_demand_fee_variability = self.on_demand_fee_variability; - config.on_demand_queue_max_size = self.on_demand_max_queue_size; - config.on_demand_target_queue_utilization = self.on_demand_target_queue_utilization; + config.scheduler_params.num_cores = self.on_demand_cores; + config.scheduler_params.on_demand_base_fee = self.on_demand_base_fee; + config.scheduler_params.on_demand_fee_variability = self.on_demand_fee_variability; + config.scheduler_params.on_demand_queue_max_size = self.on_demand_max_queue_size; + config.scheduler_params.on_demand_target_queue_utilization = + self.on_demand_target_queue_utilization; let paras = &mut genesis.paras.paras; for para_id in self.onboarded_on_demand_chains { diff --git a/polkadot/runtime/parachains/src/builder.rs b/polkadot/runtime/parachains/src/builder.rs index 016b3fca589a5b845110d9f25199cc8b8aef5bfe..5b218addb1a2552d3f9daa3dbb7baf352023f919 100644 --- a/polkadot/runtime/parachains/src/builder.rs +++ b/polkadot/runtime/parachains/src/builder.rs @@ -18,23 +18,20 @@ use crate::{ configuration, inclusion, initializer, paras, paras::ParaKind, paras_inherent, - scheduler::{ - self, - common::{AssignmentProvider, AssignmentProviderConfig}, - CoreOccupied, ParasEntry, - }, + scheduler::{self, common::AssignmentProvider, CoreOccupied, ParasEntry}, session_info, shared, }; use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; use primitives::{ - collator_signature_payload, AvailabilityBitfield, BackedCandidate, CandidateCommitments, - CandidateDescriptor, CandidateHash, CollatorId, CollatorSignature, CommittedCandidateReceipt, - CompactStatement, CoreIndex, DisputeStatement, DisputeStatementSet, GroupIndex, HeadData, - Id as ParaId, IndexedVec, InherentData as ParachainsInherentData, InvalidDisputeStatementKind, - PersistedValidationData, SessionIndex, SigningContext, UncheckedSigned, - ValidDisputeStatementKind, ValidationCode, ValidatorId, ValidatorIndex, ValidityAttestation, + collator_signature_payload, vstaging::node_features::FeatureIndex, AvailabilityBitfield, + BackedCandidate, CandidateCommitments, CandidateDescriptor, CandidateHash, CollatorId, + CollatorSignature, CommittedCandidateReceipt, CompactStatement, CoreIndex, DisputeStatement, + DisputeStatementSet, GroupIndex, HeadData, Id as ParaId, IndexedVec, + InherentData as ParachainsInherentData, InvalidDisputeStatementKind, PersistedValidationData, + SessionIndex, SigningContext, UncheckedSigned, ValidDisputeStatementKind, ValidationCode, + ValidatorId, ValidatorIndex, ValidityAttestation, }; use sp_core::{sr25519, H256}; use sp_runtime::{ @@ -197,7 +194,10 @@ impl BenchBuilder { /// Maximum number of validators per core (a.k.a. max validators per group). This value is used /// if none is explicitly set on the builder. pub(crate) fn fallback_max_validators_per_core() -> u32 { - configuration::Pallet::::config().max_validators_per_core.unwrap_or(5) + configuration::Pallet::::config() + .scheduler_params + .max_validators_per_core + .unwrap_or(5) } /// Specify a mapping of core index/ para id to the number of dispute statements for the @@ -510,7 +510,7 @@ impl BenchBuilder { .iter() .map(|(seed, num_votes)| { assert!(*num_votes <= validators.len() as u32); - let (para_id, _core_idx, group_idx) = self.create_indexes(*seed); + let (para_id, core_idx, group_idx) = self.create_indexes(*seed); // This generates a pair and adds it to the keystore, returning just the public. let collator_public = CollatorId::generate_pair(None); @@ -587,11 +587,19 @@ impl BenchBuilder { }) .collect(); - BackedCandidate:: { + // Check if the elastic scaling bit is set, if so we need to supply the core index + // in the generated candidate. + let core_idx = configuration::Pallet::::config() + .node_features + .get(FeatureIndex::ElasticScalingMVP as usize) + .map(|_the_bit| core_idx); + + BackedCandidate::::new( candidate, validity_votes, - validator_indices: bitvec::bitvec![u8, bitvec::order::Lsb0; 1; group_validators.len()], - } + bitvec::bitvec![u8, bitvec::order::Lsb0; 1; group_validators.len()], + core_idx, + ) }) .collect() } @@ -683,7 +691,7 @@ impl BenchBuilder { // We are currently in Session 0, so these changes will take effect in Session 2. Self::setup_para_ids(used_cores); configuration::ActiveConfig::::mutate(|c| { - c.coretime_cores = used_cores; + c.scheduler_params.num_cores = used_cores; }); let validator_ids = Self::generate_validator_pairs(self.max_validators()); @@ -714,8 +722,7 @@ impl BenchBuilder { let cores = (0..used_cores) .into_iter() .map(|i| { - let AssignmentProviderConfig { ttl, .. } = - scheduler::Pallet::::assignment_provider_config(CoreIndex(i)); + let ttl = configuration::Pallet::::config().scheduler_params.ttl; // Load an assignment into provider so that one is present to pop let assignment = ::AssignmentProvider::get_mock_assignment( CoreIndex(i), @@ -730,8 +737,7 @@ impl BenchBuilder { let cores = (0..used_cores) .into_iter() .map(|i| { - let AssignmentProviderConfig { ttl, .. } = - scheduler::Pallet::::assignment_provider_config(CoreIndex(i)); + let ttl = configuration::Pallet::::config().scheduler_params.ttl; // Load an assignment into provider so that one is present to pop let assignment = ::AssignmentProvider::get_mock_assignment( diff --git a/polkadot/runtime/parachains/src/configuration.rs b/polkadot/runtime/parachains/src/configuration.rs index 7cc5b31fc8fd1dfbed7a02b293eca2dab4b95cf4..364a15215d38c5eae477b3a73987a43d0216278b 100644 --- a/polkadot/runtime/parachains/src/configuration.rs +++ b/polkadot/runtime/parachains/src/configuration.rs @@ -29,7 +29,6 @@ use primitives::{ vstaging::{ApprovalVotingParams, NodeFeatures}, AsyncBackingParams, Balance, ExecutorParamError, ExecutorParams, SessionIndex, LEGACY_MIN_BACKING_VOTES, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, - ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, }; use sp_runtime::{traits::Zero, Perbill}; use sp_std::prelude::*; @@ -43,6 +42,7 @@ mod benchmarking; pub mod migration; pub use pallet::*; +use primitives::vstaging::SchedulerParams; const LOG_TARGET: &str = "runtime::configuration"; @@ -118,9 +118,9 @@ pub struct HostConfiguration { /// been completed. /// /// Note, there are situations in which `expected_at` in the past. For example, if - /// [`paras_availability_period`](Self::paras_availability_period) is less than the delay set - /// by this field or if PVF pre-check took more time than the delay. In such cases, the upgrade - /// is further at the earliest possible time determined by + /// [`paras_availability_period`](SchedulerParams::paras_availability_period) is less than the + /// delay set by this field or if PVF pre-check took more time than the delay. In such cases, + /// the upgrade is further at the earliest possible time determined by /// [`minimum_validation_upgrade_delay`](Self::minimum_validation_upgrade_delay). /// /// The rationale for this delay has to do with relay-chain reversions. In case there is an @@ -172,48 +172,7 @@ pub struct HostConfiguration { /// How long to keep code on-chain, in blocks. This should be sufficiently long that disputes /// have concluded. pub code_retention_period: BlockNumber, - /// How many cores are managed by the coretime chain. - pub coretime_cores: u32, - /// The number of retries that a on demand author has to submit their block. - pub on_demand_retries: u32, - /// The maximum queue size of the pay as you go module. - pub on_demand_queue_max_size: u32, - /// The target utilization of the spot price queue in percentages. - pub on_demand_target_queue_utilization: Perbill, - /// How quickly the fee rises in reaction to increased utilization. - /// The lower the number the slower the increase. - pub on_demand_fee_variability: Perbill, - /// The minimum amount needed to claim a slot in the spot pricing queue. - pub on_demand_base_fee: Balance, - /// The number of blocks an on demand claim stays in the scheduler's claimqueue before getting - /// cleared. This number should go reasonably higher than the number of blocks in the async - /// backing lookahead. - pub on_demand_ttl: BlockNumber, - /// How often parachain groups should be rotated across parachains. - /// - /// Must be non-zero. - pub group_rotation_frequency: BlockNumber, - /// The minimum availability period, in blocks. - /// - /// This is the minimum amount of blocks after a core became occupied that validators have time - /// to make the block available. - /// - /// This value only has effect on group rotations. If backers backed something at the end of - /// their rotation, the occupied core affects the backing group that comes afterwards. We limit - /// the effect one backing group can have on the next to `paras_availability_period` blocks. - /// - /// Within a group rotation there is no timeout as backers are only affecting themselves. - /// - /// Must be at least 1. With a value of 1, the previous group will not be able to negatively - /// affect the following group at the expense of a tight availability timeline at group - /// rotation boundaries. - pub paras_availability_period: BlockNumber, - /// The amount of blocks ahead to schedule paras. - pub scheduling_lookahead: u32, - /// The maximum number of validators to have per core. - /// - /// `None` means no maximum. - pub max_validators_per_core: Option, + /// The maximum number of validators to use for parachain consensus, period. /// /// `None` means no maximum. @@ -257,7 +216,7 @@ pub struct HostConfiguration { /// scheduled. This number is controlled by this field. /// /// This value should be greater than - /// [`paras_availability_period`](Self::paras_availability_period). + /// [`paras_availability_period`](SchedulerParams::paras_availability_period). pub minimum_validation_upgrade_delay: BlockNumber, /// The minimum number of valid backing statements required to consider a parachain candidate /// backable. @@ -266,6 +225,8 @@ pub struct HostConfiguration { pub node_features: NodeFeatures, /// Params used by approval-voting pub approval_voting_params: ApprovalVotingParams, + /// Scheduler parameters + pub scheduler_params: SchedulerParams, } impl> Default for HostConfiguration { @@ -275,8 +236,6 @@ impl> Default for HostConfiguration> Default for HostConfiguration> Default for HostConfiguration { ZeroMinimumBackingVotes, /// `executor_params` are inconsistent. InconsistentExecutorParams { inner: ExecutorParamError }, + /// TTL should be bigger than lookahead + LookaheadExceedsTTL, } impl HostConfiguration @@ -373,11 +326,11 @@ where pub fn check_consistency(&self) -> Result<(), InconsistentError> { use InconsistentError::*; - if self.group_rotation_frequency.is_zero() { + if self.scheduler_params.group_rotation_frequency.is_zero() { return Err(ZeroGroupRotationFrequency) } - if self.paras_availability_period.is_zero() { + if self.scheduler_params.paras_availability_period.is_zero() { return Err(ZeroParasAvailabilityPeriod) } @@ -399,10 +352,11 @@ where return Err(MaxPovSizeExceedHardLimit { max_pov_size: self.max_pov_size }) } - if self.minimum_validation_upgrade_delay <= self.paras_availability_period { + if self.minimum_validation_upgrade_delay <= self.scheduler_params.paras_availability_period + { return Err(MinimumValidationUpgradeDelayLessThanChainAvailabilityPeriod { minimum_validation_upgrade_delay: self.minimum_validation_upgrade_delay.clone(), - paras_availability_period: self.paras_availability_period.clone(), + paras_availability_period: self.scheduler_params.paras_availability_period.clone(), }) } @@ -447,6 +401,10 @@ where return Err(InconsistentExecutorParams { inner }) } + if self.scheduler_params.ttl < self.scheduler_params.lookahead.into() { + return Err(LookaheadExceedsTTL) + } + Ok(()) } @@ -471,6 +429,7 @@ pub trait WeightInfo { fn set_config_with_executor_params() -> Weight; fn set_config_with_perbill() -> Weight; fn set_node_feature() -> Weight; + fn set_config_with_scheduler_params() -> Weight; } pub struct TestWeightInfo; @@ -499,13 +458,16 @@ impl WeightInfo for TestWeightInfo { fn set_node_feature() -> Weight { Weight::MAX } + fn set_config_with_scheduler_params() -> Weight { + Weight::MAX + } } #[frame_support::pallet] pub mod pallet { use super::*; - /// The current storage version. + /// The in-code storage version. /// /// v0-v1: /// v1-v2: @@ -520,7 +482,8 @@ pub mod pallet { /// v8-v9: /// v9-v10: /// v10-11: - const STORAGE_VERSION: StorageVersion = StorageVersion::new(11); + /// v11-12: + const STORAGE_VERSION: StorageVersion = StorageVersion::new(12); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] @@ -679,16 +642,16 @@ pub mod pallet { Self::set_coretime_cores_unchecked(new) } - /// Set the number of retries for a particular on demand. + /// Set the max number of times a claim may timeout on a core before it is abandoned #[pallet::call_index(7)] #[pallet::weight(( T::WeightInfo::set_config_with_u32(), DispatchClass::Operational, ))] - pub fn set_on_demand_retries(origin: OriginFor, new: u32) -> DispatchResult { + pub fn set_max_availability_timeouts(origin: OriginFor, new: u32) -> DispatchResult { ensure_root(origin)?; Self::schedule_config_update(|config| { - config.on_demand_retries = new; + config.scheduler_params.max_availability_timeouts = new; }) } @@ -704,7 +667,7 @@ pub mod pallet { ) -> DispatchResult { ensure_root(origin)?; Self::schedule_config_update(|config| { - config.group_rotation_frequency = new; + config.scheduler_params.group_rotation_frequency = new; }) } @@ -720,7 +683,7 @@ pub mod pallet { ) -> DispatchResult { ensure_root(origin)?; Self::schedule_config_update(|config| { - config.paras_availability_period = new; + config.scheduler_params.paras_availability_period = new; }) } @@ -733,7 +696,7 @@ pub mod pallet { pub fn set_scheduling_lookahead(origin: OriginFor, new: u32) -> DispatchResult { ensure_root(origin)?; Self::schedule_config_update(|config| { - config.scheduling_lookahead = new; + config.scheduler_params.lookahead = new; }) } @@ -749,7 +712,7 @@ pub mod pallet { ) -> DispatchResult { ensure_root(origin)?; Self::schedule_config_update(|config| { - config.max_validators_per_core = new; + config.scheduler_params.max_validators_per_core = new; }) } @@ -1141,7 +1104,7 @@ pub mod pallet { pub fn set_on_demand_base_fee(origin: OriginFor, new: Balance) -> DispatchResult { ensure_root(origin)?; Self::schedule_config_update(|config| { - config.on_demand_base_fee = new; + config.scheduler_params.on_demand_base_fee = new; }) } @@ -1154,7 +1117,7 @@ pub mod pallet { pub fn set_on_demand_fee_variability(origin: OriginFor, new: Perbill) -> DispatchResult { ensure_root(origin)?; Self::schedule_config_update(|config| { - config.on_demand_fee_variability = new; + config.scheduler_params.on_demand_fee_variability = new; }) } @@ -1167,7 +1130,7 @@ pub mod pallet { pub fn set_on_demand_queue_max_size(origin: OriginFor, new: u32) -> DispatchResult { ensure_root(origin)?; Self::schedule_config_update(|config| { - config.on_demand_queue_max_size = new; + config.scheduler_params.on_demand_queue_max_size = new; }) } /// Set the on demand (parathreads) fee variability. @@ -1182,7 +1145,7 @@ pub mod pallet { ) -> DispatchResult { ensure_root(origin)?; Self::schedule_config_update(|config| { - config.on_demand_target_queue_utilization = new; + config.scheduler_params.on_demand_target_queue_utilization = new; }) } /// Set the on demand (parathreads) ttl in the claimqueue. @@ -1194,7 +1157,7 @@ pub mod pallet { pub fn set_on_demand_ttl(origin: OriginFor, new: BlockNumberFor) -> DispatchResult { ensure_root(origin)?; Self::schedule_config_update(|config| { - config.on_demand_ttl = new; + config.scheduler_params.ttl = new; }) } @@ -1244,6 +1207,22 @@ pub mod pallet { config.approval_voting_params = new; }) } + + /// Set scheduler-params. + #[pallet::call_index(55)] + #[pallet::weight(( + T::WeightInfo::set_config_with_scheduler_params(), + DispatchClass::Operational, + ))] + pub fn set_scheduler_params( + origin: OriginFor, + new: SchedulerParams>, + ) -> DispatchResult { + ensure_root(origin)?; + Self::schedule_config_update(|config| { + config.scheduler_params = new; + }) + } } impl Pallet { @@ -1252,7 +1231,7 @@ pub mod pallet { /// To be used if authorization is checked otherwise. pub fn set_coretime_cores_unchecked(new: u32) -> DispatchResult { Self::schedule_config_update(|config| { - config.coretime_cores = new; + config.scheduler_params.num_cores = new; }) } } @@ -1393,7 +1372,7 @@ impl Pallet { let base_config_consistent = base_config.check_consistency().is_ok(); // Now, we need to decide what the new configuration should be. - // We also move the `base_config` to `new_config` to empahsize that the base config was + // We also move the `base_config` to `new_config` to emphasize that the base config was // destroyed by the `updater`. updater(&mut base_config); let new_config = base_config; diff --git a/polkadot/runtime/parachains/src/configuration/benchmarking.rs b/polkadot/runtime/parachains/src/configuration/benchmarking.rs index 67daf1c459884d42378056b6528605da29578c95..882b5aab096ad2f8227aa7ad0efaddfd2078c3ad 100644 --- a/polkadot/runtime/parachains/src/configuration/benchmarking.rs +++ b/polkadot/runtime/parachains/src/configuration/benchmarking.rs @@ -51,6 +51,8 @@ benchmarks! { set_node_feature{}: set_node_feature(RawOrigin::Root, 255, true) + set_config_with_scheduler_params {} : set_scheduler_params(RawOrigin::Root, SchedulerParams::default()) + impl_benchmark_test_suite!( Pallet, crate::mock::new_test_ext(Default::default()), diff --git a/polkadot/runtime/parachains/src/configuration/migration.rs b/polkadot/runtime/parachains/src/configuration/migration.rs index 2838b73092dbab4a029a684948a85123aa489906..87b30b177e735bdc1ec618a6e403c4c74a801533 100644 --- a/polkadot/runtime/parachains/src/configuration/migration.rs +++ b/polkadot/runtime/parachains/src/configuration/migration.rs @@ -18,6 +18,7 @@ pub mod v10; pub mod v11; +pub mod v12; pub mod v6; pub mod v7; pub mod v8; diff --git a/polkadot/runtime/parachains/src/configuration/migration/v11.rs b/polkadot/runtime/parachains/src/configuration/migration/v11.rs index a69061ac1381e253cdbaef6d4681511973f83e86..f6e0e0431640ba271f592759308cc0d07efcfa71 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v11.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v11.rs @@ -21,13 +21,122 @@ use frame_support::{ migrations::VersionedMigration, pallet_prelude::*, traits::Defensive, weights::Weight, }; use frame_system::pallet_prelude::BlockNumberFor; -use primitives::{vstaging::ApprovalVotingParams, SessionIndex}; +use primitives::{ + vstaging::ApprovalVotingParams, AsyncBackingParams, ExecutorParams, SessionIndex, + LEGACY_MIN_BACKING_VOTES, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, +}; use sp_std::vec::Vec; use frame_support::traits::OnRuntimeUpgrade; +use polkadot_core_primitives::Balance; +use primitives::vstaging::NodeFeatures; +use sp_arithmetic::Perbill; use super::v10::V10HostConfiguration; -type V11HostConfiguration = configuration::HostConfiguration; + +#[derive(Clone, Encode, PartialEq, Decode, Debug)] +pub struct V11HostConfiguration { + pub max_code_size: u32, + pub max_head_data_size: u32, + pub max_upward_queue_count: u32, + pub max_upward_queue_size: u32, + pub max_upward_message_size: u32, + pub max_upward_message_num_per_candidate: u32, + pub hrmp_max_message_num_per_candidate: u32, + pub validation_upgrade_cooldown: BlockNumber, + pub validation_upgrade_delay: BlockNumber, + pub async_backing_params: AsyncBackingParams, + pub max_pov_size: u32, + pub max_downward_message_size: u32, + pub hrmp_max_parachain_outbound_channels: u32, + pub hrmp_sender_deposit: Balance, + pub hrmp_recipient_deposit: Balance, + pub hrmp_channel_max_capacity: u32, + pub hrmp_channel_max_total_size: u32, + pub hrmp_max_parachain_inbound_channels: u32, + pub hrmp_channel_max_message_size: u32, + pub executor_params: ExecutorParams, + pub code_retention_period: BlockNumber, + pub coretime_cores: u32, + pub on_demand_retries: u32, + pub on_demand_queue_max_size: u32, + pub on_demand_target_queue_utilization: Perbill, + pub on_demand_fee_variability: Perbill, + pub on_demand_base_fee: Balance, + pub on_demand_ttl: BlockNumber, + pub group_rotation_frequency: BlockNumber, + pub paras_availability_period: BlockNumber, + pub scheduling_lookahead: u32, + pub max_validators_per_core: Option, + pub max_validators: Option, + pub dispute_period: SessionIndex, + pub dispute_post_conclusion_acceptance_period: BlockNumber, + pub no_show_slots: u32, + pub n_delay_tranches: u32, + pub zeroth_delay_tranche_width: u32, + pub needed_approvals: u32, + pub relay_vrf_modulo_samples: u32, + pub pvf_voting_ttl: SessionIndex, + pub minimum_validation_upgrade_delay: BlockNumber, + pub minimum_backing_votes: u32, + pub node_features: NodeFeatures, + pub approval_voting_params: ApprovalVotingParams, +} + +impl> Default for V11HostConfiguration { + fn default() -> Self { + Self { + async_backing_params: AsyncBackingParams { + max_candidate_depth: 0, + allowed_ancestry_len: 0, + }, + group_rotation_frequency: 1u32.into(), + paras_availability_period: 1u32.into(), + no_show_slots: 1u32.into(), + validation_upgrade_cooldown: Default::default(), + validation_upgrade_delay: 2u32.into(), + code_retention_period: Default::default(), + max_code_size: Default::default(), + max_pov_size: Default::default(), + max_head_data_size: Default::default(), + coretime_cores: Default::default(), + on_demand_retries: Default::default(), + scheduling_lookahead: 1, + max_validators_per_core: Default::default(), + max_validators: None, + dispute_period: 6, + dispute_post_conclusion_acceptance_period: 100.into(), + n_delay_tranches: Default::default(), + zeroth_delay_tranche_width: Default::default(), + needed_approvals: Default::default(), + relay_vrf_modulo_samples: Default::default(), + max_upward_queue_count: Default::default(), + max_upward_queue_size: Default::default(), + max_downward_message_size: Default::default(), + max_upward_message_size: Default::default(), + max_upward_message_num_per_candidate: Default::default(), + hrmp_sender_deposit: Default::default(), + hrmp_recipient_deposit: Default::default(), + hrmp_channel_max_capacity: Default::default(), + hrmp_channel_max_total_size: Default::default(), + hrmp_max_parachain_inbound_channels: Default::default(), + hrmp_channel_max_message_size: Default::default(), + hrmp_max_parachain_outbound_channels: Default::default(), + hrmp_max_message_num_per_candidate: Default::default(), + pvf_voting_ttl: 2u32.into(), + minimum_validation_upgrade_delay: 2.into(), + executor_params: Default::default(), + approval_voting_params: ApprovalVotingParams { max_approval_coalesce_count: 1 }, + on_demand_queue_max_size: ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, + on_demand_base_fee: 10_000_000u128, + on_demand_fee_variability: Perbill::from_percent(3), + on_demand_target_queue_utilization: Perbill::from_percent(25), + on_demand_ttl: 5u32.into(), + minimum_backing_votes: LEGACY_MIN_BACKING_VOTES, + node_features: NodeFeatures::EMPTY, + } + } +} mod v10 { use super::*; diff --git a/polkadot/runtime/parachains/src/configuration/migration/v12.rs b/polkadot/runtime/parachains/src/configuration/migration/v12.rs new file mode 100644 index 0000000000000000000000000000000000000000..4295a79893e8ee52632d54772bd108b4de37cc3f --- /dev/null +++ b/polkadot/runtime/parachains/src/configuration/migration/v12.rs @@ -0,0 +1,349 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! A module that is responsible for migration of storage. + +use crate::configuration::{self, migration::v11::V11HostConfiguration, Config, Pallet}; +use frame_support::{ + migrations::VersionedMigration, + pallet_prelude::*, + traits::{Defensive, OnRuntimeUpgrade}, +}; +use frame_system::pallet_prelude::BlockNumberFor; +use primitives::vstaging::SchedulerParams; +use sp_core::Get; +use sp_staking::SessionIndex; +use sp_std::vec::Vec; + +type V12HostConfiguration = configuration::HostConfiguration; + +mod v11 { + use super::*; + + #[frame_support::storage_alias] + pub(crate) type ActiveConfig = + StorageValue, V11HostConfiguration>, OptionQuery>; + + #[frame_support::storage_alias] + pub(crate) type PendingConfigs = StorageValue< + Pallet, + Vec<(SessionIndex, V11HostConfiguration>)>, + OptionQuery, + >; +} + +mod v12 { + use super::*; + + #[frame_support::storage_alias] + pub(crate) type ActiveConfig = + StorageValue, V12HostConfiguration>, OptionQuery>; + + #[frame_support::storage_alias] + pub(crate) type PendingConfigs = StorageValue< + Pallet, + Vec<(SessionIndex, V12HostConfiguration>)>, + OptionQuery, + >; +} + +pub type MigrateToV12 = VersionedMigration< + 11, + 12, + UncheckedMigrateToV12, + Pallet, + ::DbWeight, +>; + +pub struct UncheckedMigrateToV12(sp_std::marker::PhantomData); + +impl OnRuntimeUpgrade for UncheckedMigrateToV12 { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + log::trace!(target: crate::configuration::LOG_TARGET, "Running pre_upgrade() for HostConfiguration MigrateToV12"); + Ok(Vec::new()) + } + + fn on_runtime_upgrade() -> Weight { + log::info!(target: configuration::LOG_TARGET, "HostConfiguration MigrateToV12 started"); + let weight_consumed = migrate_to_v12::(); + + log::info!(target: configuration::LOG_TARGET, "HostConfiguration MigrateToV12 executed successfully"); + + weight_consumed + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + log::trace!(target: crate::configuration::LOG_TARGET, "Running post_upgrade() for HostConfiguration MigrateToV12"); + ensure!( + StorageVersion::get::>() >= 12, + "Storage version should be >= 12 after the migration" + ); + + Ok(()) + } +} + +fn migrate_to_v12() -> Weight { + // Unusual formatting is justified: + // - make it easier to verify that fields assign what they supposed to assign. + // - this code is transient and will be removed after all migrations are done. + // - this code is important enough to optimize for legibility sacrificing consistency. + #[rustfmt::skip] + let translate = + |pre: V11HostConfiguration>| -> + V12HostConfiguration> + { + V12HostConfiguration { + max_code_size : pre.max_code_size, + max_head_data_size : pre.max_head_data_size, + max_upward_queue_count : pre.max_upward_queue_count, + max_upward_queue_size : pre.max_upward_queue_size, + max_upward_message_size : pre.max_upward_message_size, + max_upward_message_num_per_candidate : pre.max_upward_message_num_per_candidate, + hrmp_max_message_num_per_candidate : pre.hrmp_max_message_num_per_candidate, + validation_upgrade_cooldown : pre.validation_upgrade_cooldown, + validation_upgrade_delay : pre.validation_upgrade_delay, + max_pov_size : pre.max_pov_size, + max_downward_message_size : pre.max_downward_message_size, + hrmp_sender_deposit : pre.hrmp_sender_deposit, + hrmp_recipient_deposit : pre.hrmp_recipient_deposit, + hrmp_channel_max_capacity : pre.hrmp_channel_max_capacity, + hrmp_channel_max_total_size : pre.hrmp_channel_max_total_size, + hrmp_max_parachain_inbound_channels : pre.hrmp_max_parachain_inbound_channels, + hrmp_max_parachain_outbound_channels : pre.hrmp_max_parachain_outbound_channels, + hrmp_channel_max_message_size : pre.hrmp_channel_max_message_size, + code_retention_period : pre.code_retention_period, + max_validators : pre.max_validators, + dispute_period : pre.dispute_period, + dispute_post_conclusion_acceptance_period: pre.dispute_post_conclusion_acceptance_period, + no_show_slots : pre.no_show_slots, + n_delay_tranches : pre.n_delay_tranches, + zeroth_delay_tranche_width : pre.zeroth_delay_tranche_width, + needed_approvals : pre.needed_approvals, + relay_vrf_modulo_samples : pre.relay_vrf_modulo_samples, + pvf_voting_ttl : pre.pvf_voting_ttl, + minimum_validation_upgrade_delay : pre.minimum_validation_upgrade_delay, + async_backing_params : pre.async_backing_params, + executor_params : pre.executor_params, + minimum_backing_votes : pre.minimum_backing_votes, + node_features : pre.node_features, + approval_voting_params : pre.approval_voting_params, + scheduler_params: SchedulerParams { + group_rotation_frequency : pre.group_rotation_frequency, + paras_availability_period : pre.paras_availability_period, + max_validators_per_core : pre.max_validators_per_core, + lookahead : pre.scheduling_lookahead, + num_cores : pre.coretime_cores, + max_availability_timeouts : pre.on_demand_retries, + on_demand_queue_max_size : pre.on_demand_queue_max_size, + on_demand_target_queue_utilization : pre.on_demand_target_queue_utilization, + on_demand_fee_variability : pre.on_demand_fee_variability, + on_demand_base_fee : pre.on_demand_base_fee, + ttl : pre.on_demand_ttl, + } + } + }; + + let v11 = v11::ActiveConfig::::get() + .defensive_proof("Could not decode old config") + .unwrap_or_default(); + let v12 = translate(v11); + v12::ActiveConfig::::set(Some(v12)); + + // Allowed to be empty. + let pending_v11 = v11::PendingConfigs::::get().unwrap_or_default(); + let mut pending_v12 = Vec::new(); + + for (session, v11) in pending_v11.into_iter() { + let v12 = translate(v11); + pending_v12.push((session, v12)); + } + v12::PendingConfigs::::set(Some(pending_v12.clone())); + + let num_configs = (pending_v12.len() + 1) as u64; + T::DbWeight::get().reads_writes(num_configs, num_configs) +} + +#[cfg(test)] +mod tests { + use primitives::LEGACY_MIN_BACKING_VOTES; + use sp_arithmetic::Perbill; + + use super::*; + use crate::mock::{new_test_ext, Test}; + + #[test] + fn v12_deserialized_from_actual_data() { + // Example how to get new `raw_config`: + // We'll obtain the raw_config at a specified a block + // Steps: + // 1. Go to Polkadot.js -> Developer -> Chain state -> Storage: https://polkadot.js.org/apps/#/chainstate + // 2. Set these parameters: + // 2.1. selected state query: configuration; activeConfig(): + // PolkadotRuntimeParachainsConfigurationHostConfiguration + // 2.2. blockhash to query at: + // 0xf89d3ab5312c5f70d396dc59612f0aa65806c798346f9db4b35278baed2e0e53 (the hash of + // the block) + // 2.3. Note the value of encoded storage key -> + // 0x06de3d8a54d27e44a9d5ce189618f22db4b49d95320d9021994c850f25b8e385 for the + // referenced block. + // 2.4. You'll also need the decoded values to update the test. + // 3. Go to Polkadot.js -> Developer -> Chain state -> Raw storage + // 3.1 Enter the encoded storage key and you get the raw config. + + // This exceeds the maximal line width length, but that's fine, since this is not code and + // doesn't need to be read and also leaving it as one line allows to easily copy it. + let raw_config = + hex_literal::hex![ + "0000300000800000080000000000100000c8000005000000050000000200000002000000000000000000000000005000000010000400000000000000000000000000000000000000000000000000000000000000000000000800000000200000040000000000100000b004000000060000006400000002000000190000000000000002000000020000000200000005000000020000000001000000140000000400000001010000000100000001000000000000001027000080b2e60e80c3c9018096980000000000000000000000000005000000" + ]; + + let v12 = + V12HostConfiguration::::decode(&mut &raw_config[..]).unwrap(); + + // We check only a sample of the values here. If we missed any fields or messed up data + // types that would skew all the fields coming after. + assert_eq!(v12.max_code_size, 3_145_728); + assert_eq!(v12.validation_upgrade_cooldown, 2); + assert_eq!(v12.max_pov_size, 5_242_880); + assert_eq!(v12.hrmp_channel_max_message_size, 1_048_576); + assert_eq!(v12.n_delay_tranches, 25); + assert_eq!(v12.minimum_validation_upgrade_delay, 5); + assert_eq!(v12.minimum_backing_votes, LEGACY_MIN_BACKING_VOTES); + assert_eq!(v12.approval_voting_params.max_approval_coalesce_count, 1); + assert_eq!(v12.scheduler_params.group_rotation_frequency, 20); + assert_eq!(v12.scheduler_params.paras_availability_period, 4); + assert_eq!(v12.scheduler_params.lookahead, 1); + assert_eq!(v12.scheduler_params.num_cores, 1); + assert_eq!(v12.scheduler_params.max_availability_timeouts, 0); + assert_eq!(v12.scheduler_params.on_demand_queue_max_size, 10_000); + assert_eq!( + v12.scheduler_params.on_demand_target_queue_utilization, + Perbill::from_percent(25) + ); + assert_eq!(v12.scheduler_params.on_demand_fee_variability, Perbill::from_percent(3)); + assert_eq!(v12.scheduler_params.on_demand_base_fee, 10_000_000); + assert_eq!(v12.scheduler_params.ttl, 5); + } + + #[test] + fn test_migrate_to_v12() { + // Host configuration has lots of fields. However, in this migration we only add one + // field. The most important part to check are a couple of the last fields. We also pick + // extra fields to check arbitrarily, e.g. depending on their position (i.e. the middle) and + // also their type. + // + // We specify only the picked fields and the rest should be provided by the `Default` + // implementation. That implementation is copied over between the two types and should work + // fine. + let v11 = V11HostConfiguration:: { + needed_approvals: 69, + paras_availability_period: 55, + hrmp_recipient_deposit: 1337, + max_pov_size: 1111, + minimum_validation_upgrade_delay: 20, + on_demand_ttl: 3, + on_demand_retries: 10, + ..Default::default() + }; + + let mut pending_configs = Vec::new(); + pending_configs.push((100, v11.clone())); + pending_configs.push((300, v11.clone())); + + new_test_ext(Default::default()).execute_with(|| { + // Implant the v10 version in the state. + v11::ActiveConfig::::set(Some(v11.clone())); + v11::PendingConfigs::::set(Some(pending_configs)); + + migrate_to_v12::(); + + let v12 = v12::ActiveConfig::::get().unwrap(); + assert_eq!(v12.approval_voting_params.max_approval_coalesce_count, 1); + + let mut configs_to_check = v12::PendingConfigs::::get().unwrap(); + configs_to_check.push((0, v12.clone())); + + for (_, v12) in configs_to_check { + #[rustfmt::skip] + { + assert_eq!(v11.max_code_size , v12.max_code_size); + assert_eq!(v11.max_head_data_size , v12.max_head_data_size); + assert_eq!(v11.max_upward_queue_count , v12.max_upward_queue_count); + assert_eq!(v11.max_upward_queue_size , v12.max_upward_queue_size); + assert_eq!(v11.max_upward_message_size , v12.max_upward_message_size); + assert_eq!(v11.max_upward_message_num_per_candidate , v12.max_upward_message_num_per_candidate); + assert_eq!(v11.hrmp_max_message_num_per_candidate , v12.hrmp_max_message_num_per_candidate); + assert_eq!(v11.validation_upgrade_cooldown , v12.validation_upgrade_cooldown); + assert_eq!(v11.validation_upgrade_delay , v12.validation_upgrade_delay); + assert_eq!(v11.max_pov_size , v12.max_pov_size); + assert_eq!(v11.max_downward_message_size , v12.max_downward_message_size); + assert_eq!(v11.hrmp_max_parachain_outbound_channels , v12.hrmp_max_parachain_outbound_channels); + assert_eq!(v11.hrmp_sender_deposit , v12.hrmp_sender_deposit); + assert_eq!(v11.hrmp_recipient_deposit , v12.hrmp_recipient_deposit); + assert_eq!(v11.hrmp_channel_max_capacity , v12.hrmp_channel_max_capacity); + assert_eq!(v11.hrmp_channel_max_total_size , v12.hrmp_channel_max_total_size); + assert_eq!(v11.hrmp_max_parachain_inbound_channels , v12.hrmp_max_parachain_inbound_channels); + assert_eq!(v11.hrmp_channel_max_message_size , v12.hrmp_channel_max_message_size); + assert_eq!(v11.code_retention_period , v12.code_retention_period); + assert_eq!(v11.max_validators , v12.max_validators); + assert_eq!(v11.dispute_period , v12.dispute_period); + assert_eq!(v11.no_show_slots , v12.no_show_slots); + assert_eq!(v11.n_delay_tranches , v12.n_delay_tranches); + assert_eq!(v11.zeroth_delay_tranche_width , v12.zeroth_delay_tranche_width); + assert_eq!(v11.needed_approvals , v12.needed_approvals); + assert_eq!(v11.relay_vrf_modulo_samples , v12.relay_vrf_modulo_samples); + assert_eq!(v11.pvf_voting_ttl , v12.pvf_voting_ttl); + assert_eq!(v11.minimum_validation_upgrade_delay , v12.minimum_validation_upgrade_delay); + assert_eq!(v11.async_backing_params.allowed_ancestry_len, v12.async_backing_params.allowed_ancestry_len); + assert_eq!(v11.async_backing_params.max_candidate_depth , v12.async_backing_params.max_candidate_depth); + assert_eq!(v11.executor_params , v12.executor_params); + assert_eq!(v11.minimum_backing_votes , v12.minimum_backing_votes); + assert_eq!(v11.group_rotation_frequency , v12.scheduler_params.group_rotation_frequency); + assert_eq!(v11.paras_availability_period , v12.scheduler_params.paras_availability_period); + assert_eq!(v11.max_validators_per_core , v12.scheduler_params.max_validators_per_core); + assert_eq!(v11.scheduling_lookahead , v12.scheduler_params.lookahead); + assert_eq!(v11.coretime_cores , v12.scheduler_params.num_cores); + assert_eq!(v11.on_demand_retries , v12.scheduler_params.max_availability_timeouts); + assert_eq!(v11.on_demand_queue_max_size , v12.scheduler_params.on_demand_queue_max_size); + assert_eq!(v11.on_demand_target_queue_utilization , v12.scheduler_params.on_demand_target_queue_utilization); + assert_eq!(v11.on_demand_fee_variability , v12.scheduler_params.on_demand_fee_variability); + assert_eq!(v11.on_demand_base_fee , v12.scheduler_params.on_demand_base_fee); + assert_eq!(v11.on_demand_ttl , v12.scheduler_params.ttl); + }; // ; makes this a statement. `rustfmt::skip` cannot be put on an expression. + } + }); + } + + // Test that migration doesn't panic in case there are no pending configurations upgrades in + // pallet's storage. + #[test] + fn test_migrate_to_v12_no_pending() { + let v11 = V11HostConfiguration::::default(); + + new_test_ext(Default::default()).execute_with(|| { + // Implant the v10 version in the state. + v11::ActiveConfig::::set(Some(v11)); + // Ensure there are no pending configs. + v12::PendingConfigs::::set(None); + + // Shouldn't fail. + migrate_to_v12::(); + }); + } +} diff --git a/polkadot/runtime/parachains/src/configuration/tests.rs b/polkadot/runtime/parachains/src/configuration/tests.rs index f1570017dd7ba75db429eac4f00f26fa277de2d6..254511231cac1729981706775bb9f844ca9f1c5c 100644 --- a/polkadot/runtime/parachains/src/configuration/tests.rs +++ b/polkadot/runtime/parachains/src/configuration/tests.rs @@ -226,8 +226,11 @@ fn invariants() { ); ActiveConfig::::put(HostConfiguration { - paras_availability_period: 10, minimum_validation_upgrade_delay: 11, + scheduler_params: SchedulerParams { + paras_availability_period: 10, + ..Default::default() + }, ..Default::default() }); assert_err!( @@ -283,12 +286,6 @@ fn setting_pending_config_members() { max_code_size: 100_000, max_pov_size: 1024, max_head_data_size: 1_000, - coretime_cores: 2, - on_demand_retries: 5, - group_rotation_frequency: 20, - paras_availability_period: 10, - scheduling_lookahead: 3, - max_validators_per_core: None, max_validators: None, dispute_period: 239, dispute_post_conclusion_acceptance_period: 10, @@ -314,13 +311,21 @@ fn setting_pending_config_members() { minimum_validation_upgrade_delay: 20, executor_params: Default::default(), approval_voting_params: ApprovalVotingParams { max_approval_coalesce_count: 1 }, - on_demand_queue_max_size: 10_000u32, - on_demand_base_fee: 10_000_000u128, - on_demand_fee_variability: Perbill::from_percent(3), - on_demand_target_queue_utilization: Perbill::from_percent(25), - on_demand_ttl: 5u32, minimum_backing_votes: 5, node_features: bitvec![u8, Lsb0; 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1], + scheduler_params: SchedulerParams { + group_rotation_frequency: 20, + paras_availability_period: 10, + max_validators_per_core: None, + lookahead: 3, + num_cores: 2, + max_availability_timeouts: 5, + on_demand_queue_max_size: 10_000u32, + on_demand_base_fee: 10_000_000u128, + on_demand_fee_variability: Perbill::from_percent(3), + on_demand_target_queue_utilization: Perbill::from_percent(25), + ttl: 5u32, + }, }; Configuration::set_validation_upgrade_cooldown( @@ -342,13 +347,19 @@ fn setting_pending_config_members() { Configuration::set_max_pov_size(RuntimeOrigin::root(), new_config.max_pov_size).unwrap(); Configuration::set_max_head_data_size(RuntimeOrigin::root(), new_config.max_head_data_size) .unwrap(); - Configuration::set_coretime_cores(RuntimeOrigin::root(), new_config.coretime_cores) - .unwrap(); - Configuration::set_on_demand_retries(RuntimeOrigin::root(), new_config.on_demand_retries) - .unwrap(); + Configuration::set_coretime_cores( + RuntimeOrigin::root(), + new_config.scheduler_params.num_cores, + ) + .unwrap(); + Configuration::set_max_availability_timeouts( + RuntimeOrigin::root(), + new_config.scheduler_params.max_availability_timeouts, + ) + .unwrap(); Configuration::set_group_rotation_frequency( RuntimeOrigin::root(), - new_config.group_rotation_frequency, + new_config.scheduler_params.group_rotation_frequency, ) .unwrap(); // This comes out of order to satisfy the validity criteria for the chain and thread @@ -360,17 +371,17 @@ fn setting_pending_config_members() { .unwrap(); Configuration::set_paras_availability_period( RuntimeOrigin::root(), - new_config.paras_availability_period, + new_config.scheduler_params.paras_availability_period, ) .unwrap(); Configuration::set_scheduling_lookahead( RuntimeOrigin::root(), - new_config.scheduling_lookahead, + new_config.scheduler_params.lookahead, ) .unwrap(); Configuration::set_max_validators_per_core( RuntimeOrigin::root(), - new_config.max_validators_per_core, + new_config.scheduler_params.max_validators_per_core, ) .unwrap(); Configuration::set_max_validators(RuntimeOrigin::root(), new_config.max_validators) diff --git a/polkadot/runtime/parachains/src/coretime/migration.rs b/polkadot/runtime/parachains/src/coretime/migration.rs index 9bc0a20ef5b4afce7d63da9bb1d231d39bf445ab..03fecc58570fba65df79ca48930acb3346d3f556 100644 --- a/polkadot/runtime/parachains/src/coretime/migration.rs +++ b/polkadot/runtime/parachains/src/coretime/migration.rs @@ -114,7 +114,7 @@ mod v_coretime { let legacy_paras = paras::Parachains::::get(); let config = >::config(); - let total_core_count = config.coretime_cores + legacy_paras.len() as u32; + let total_core_count = config.scheduler_params.num_cores + legacy_paras.len() as u32; let dmp_queue_size = crate::dmp::Pallet::::dmq_contents(T::BrokerId::get().into()).len() as u32; @@ -150,7 +150,7 @@ mod v_coretime { // Migrate to Coretime. // - // NOTE: Also migrates coretime_cores config value in configuration::ActiveConfig. + // NOTE: Also migrates `num_cores` config value in configuration::ActiveConfig. fn migrate_to_coretime< T: Config, SendXcm: xcm::v4::SendXcm, @@ -176,8 +176,8 @@ mod v_coretime { } let config = >::config(); - // coretime_cores was on_demand_cores until now: - for on_demand in 0..config.coretime_cores { + // num_cores was on_demand_cores until now: + for on_demand in 0..config.scheduler_params.num_cores { let core = CoreIndex(legacy_count.saturating_add(on_demand as _)); let r = assigner_coretime::Pallet::::assign_core( core, @@ -189,9 +189,9 @@ mod v_coretime { log::error!("Creating assignment for existing on-demand core, failed: {:?}", err); } } - let total_cores = config.coretime_cores + legacy_count; + let total_cores = config.scheduler_params.num_cores + legacy_count; configuration::ActiveConfig::::mutate(|c| { - c.coretime_cores = total_cores; + c.scheduler_params.num_cores = total_cores; }); if let Err(err) = migrate_send_assignments_to_coretime_chain::() { @@ -200,7 +200,9 @@ mod v_coretime { let single_weight = ::WeightInfo::assign_core(1); single_weight - .saturating_mul(u64::from(legacy_count.saturating_add(config.coretime_cores))) + .saturating_mul(u64::from( + legacy_count.saturating_add(config.scheduler_params.num_cores), + )) // Second read from sending assignments to the coretime chain. .saturating_add(T::DbWeight::get().reads_writes(2, 1)) } @@ -244,7 +246,8 @@ mod v_coretime { Some(mk_coretime_call(crate::coretime::CoretimeCalls::SetLease(p.into(), time_slice))) }); - let core_count: u16 = configuration::Pallet::::config().coretime_cores.saturated_into(); + let core_count: u16 = + configuration::Pallet::::config().scheduler_params.num_cores.saturated_into(); let set_core_count = iter::once(mk_coretime_call( crate::coretime::CoretimeCalls::NotifyCoreCount(core_count), )); diff --git a/polkadot/runtime/parachains/src/coretime/mod.rs b/polkadot/runtime/parachains/src/coretime/mod.rs index 531f5c2e4e470095989bbb73429cc2180ff4f321..eb9646d7e869d35763a5b0cdf8a408de66126b78 100644 --- a/polkadot/runtime/parachains/src/coretime/mod.rs +++ b/polkadot/runtime/parachains/src/coretime/mod.rs @@ -214,8 +214,8 @@ impl Pallet { } pub fn initializer_on_new_session(notification: &SessionChangeNotification>) { - let old_core_count = notification.prev_config.coretime_cores; - let new_core_count = notification.new_config.coretime_cores; + let old_core_count = notification.prev_config.scheduler_params.num_cores; + let new_core_count = notification.new_config.scheduler_params.num_cores; if new_core_count != old_core_count { let core_count: u16 = new_core_count.saturated_into(); let message = Xcm(vec![ diff --git a/polkadot/runtime/parachains/src/disputes.rs b/polkadot/runtime/parachains/src/disputes.rs index c2383dad3053882b7abec959446b23d149aa4a5f..da95b060c14a3fad03e98f5e2a75ac54eef1928c 100644 --- a/polkadot/runtime/parachains/src/disputes.rs +++ b/polkadot/runtime/parachains/src/disputes.rs @@ -379,7 +379,7 @@ pub mod pallet { type WeightInfo: WeightInfo; } - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] diff --git a/polkadot/runtime/parachains/src/hrmp/benchmarking.rs b/polkadot/runtime/parachains/src/hrmp/benchmarking.rs index 2cb49c88d437cb3e0a4c01a40d343a179316ec9d..c6baf2f30cf58279b58b644df76e5ea255f7ec4f 100644 --- a/polkadot/runtime/parachains/src/hrmp/benchmarking.rs +++ b/polkadot/runtime/parachains/src/hrmp/benchmarking.rs @@ -22,7 +22,7 @@ use crate::{ paras::{Pallet as Paras, ParaKind, ParachainsCache}, shared::Pallet as Shared, }; -use frame_benchmarking::{impl_benchmark_test_suite, v2::*, whitelisted_caller}; +use frame_benchmarking::{v2::*, whitelisted_caller}; use frame_support::{assert_ok, traits::Currency}; type BalanceOf = diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 90af9cde00a8faaec13101b490ea3ba92f4a827c..16e2e93b5617f2c85bbb308ff5d057c7ed99db9c 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -47,10 +47,7 @@ use scale_info::TypeInfo; use sp_runtime::{traits::One, DispatchError, SaturatedConversion, Saturating}; #[cfg(feature = "std")] use sp_std::fmt; -use sp_std::{ - collections::{btree_map::BTreeMap, btree_set::BTreeSet}, - prelude::*, -}; +use sp_std::{collections::btree_set::BTreeSet, prelude::*}; pub use pallet::*; @@ -601,18 +598,16 @@ impl Pallet { /// scheduled cores. If these conditions are not met, the execution of the function fails. pub(crate) fn process_candidates( allowed_relay_parents: &AllowedRelayParentsTracker>, - candidates: Vec>, - scheduled: &BTreeMap, + candidates: Vec<(BackedCandidate, CoreIndex)>, group_validators: GV, + core_index_enabled: bool, ) -> Result, DispatchError> where GV: Fn(GroupIndex) -> Option>, { let now = >::block_number(); - ensure!(candidates.len() <= scheduled.len(), Error::::UnscheduledCandidate); - - if scheduled.is_empty() { + if candidates.is_empty() { return Ok(ProcessedCandidates::default()) } @@ -648,7 +643,7 @@ impl Pallet { // // In the meantime, we do certain sanity checks on the candidates and on the scheduled // list. - for (candidate_idx, backed_candidate) in candidates.iter().enumerate() { + for (candidate_idx, (backed_candidate, core_index)) in candidates.iter().enumerate() { let relay_parent_hash = backed_candidate.descriptor().relay_parent; let para_id = backed_candidate.descriptor().para_id; @@ -663,7 +658,7 @@ impl Pallet { let relay_parent_number = match check_ctx.verify_backed_candidate( &allowed_relay_parents, candidate_idx, - backed_candidate, + backed_candidate.candidate(), )? { Err(FailedToCreatePVD) => { log::debug!( @@ -679,11 +674,22 @@ impl Pallet { Ok(rpn) => rpn, }; - let para_id = backed_candidate.descriptor().para_id; + let (validator_indices, _) = + backed_candidate.validator_indices_and_core_index(core_index_enabled); + + log::debug!( + target: LOG_TARGET, + "Candidate {:?} on {:?}, + core_index_enabled = {}", + backed_candidate.hash(), + core_index, + core_index_enabled + ); + + check_assignment_in_order(core_index)?; + let mut backers = bitvec::bitvec![u8, BitOrderLsb0; 0; validators.len()]; - let core_idx = *scheduled.get(¶_id).ok_or(Error::::UnscheduledCandidate)?; - check_assignment_in_order(core_idx)?; ensure!( >::get(¶_id).is_none() && >::get(¶_id).is_none(), @@ -694,7 +700,7 @@ impl Pallet { // assigned to core at block `N + 1`. Thus, `relay_parent_number + 1` // will always land in the current session. let group_idx = >::group_assigned_to_core( - core_idx, + *core_index, relay_parent_number + One::one(), ) .ok_or_else(|| { @@ -711,7 +717,9 @@ impl Pallet { // check the signatures in the backing and that it is a majority. { let maybe_amount_validated = primitives::check_candidate_backing( - &backed_candidate, + backed_candidate.candidate().hash(), + backed_candidate.validity_votes(), + validator_indices, &signing_context, group_vals.len(), |intra_group_vi| { @@ -738,16 +746,15 @@ impl Pallet { let mut backer_idx_and_attestation = Vec::<(ValidatorIndex, ValidityAttestation)>::with_capacity( - backed_candidate.validator_indices.count_ones(), + validator_indices.count_ones(), ); let candidate_receipt = backed_candidate.receipt(); - for ((bit_idx, _), attestation) in backed_candidate - .validator_indices + for ((bit_idx, _), attestation) in validator_indices .iter() .enumerate() .filter(|(_, signed)| **signed) - .zip(backed_candidate.validity_votes.iter().cloned()) + .zip(backed_candidate.validity_votes().iter().cloned()) { let val_idx = group_vals.get(bit_idx).expect("this query succeeded above; qed"); @@ -760,7 +767,7 @@ impl Pallet { } core_indices_and_backers.push(( - (core_idx, para_id), + (*core_index, para_id), backers, group_idx, relay_parent_number, @@ -772,7 +779,7 @@ impl Pallet { // one more sweep for actually writing to storage. let core_indices = core_indices_and_backers.iter().map(|(c, ..)| *c).collect(); - for (candidate, (core, backers, group, relay_parent_number)) in + for ((candidate, _), (core, backers, group, relay_parent_number)) in candidates.into_iter().zip(core_indices_and_backers) { let para_id = candidate.descriptor().para_id; @@ -782,16 +789,18 @@ impl Pallet { bitvec::bitvec![u8, BitOrderLsb0; 0; validators.len()]; Self::deposit_event(Event::::CandidateBacked( - candidate.candidate.to_plain(), - candidate.candidate.commitments.head_data.clone(), + candidate.candidate().to_plain(), + candidate.candidate().commitments.head_data.clone(), core.0, group, )); - let candidate_hash = candidate.candidate.hash(); + let candidate_hash = candidate.candidate().hash(); - let (descriptor, commitments) = - (candidate.candidate.descriptor, candidate.candidate.commitments); + let (descriptor, commitments) = ( + candidate.candidate().descriptor.clone(), + candidate.candidate().commitments.clone(), + ); >::insert( ¶_id, @@ -1195,10 +1204,10 @@ impl CandidateCheckContext { &self, allowed_relay_parents: &AllowedRelayParentsTracker>, candidate_idx: usize, - backed_candidate: &BackedCandidate<::Hash>, + backed_candidate_receipt: &CommittedCandidateReceipt<::Hash>, ) -> Result, FailedToCreatePVD>, Error> { - let para_id = backed_candidate.descriptor().para_id; - let relay_parent = backed_candidate.descriptor().relay_parent; + let para_id = backed_candidate_receipt.descriptor().para_id; + let relay_parent = backed_candidate_receipt.descriptor().relay_parent; // Check that the relay-parent is one of the allowed relay-parents. let (relay_parent_storage_root, relay_parent_number) = { @@ -1223,13 +1232,13 @@ impl CandidateCheckContext { let expected = persisted_validation_data.hash(); ensure!( - expected == backed_candidate.descriptor().persisted_validation_data_hash, + expected == backed_candidate_receipt.descriptor().persisted_validation_data_hash, Error::::ValidationDataHashMismatch, ); } ensure!( - backed_candidate.descriptor().check_collator_signature().is_ok(), + backed_candidate_receipt.descriptor().check_collator_signature().is_ok(), Error::::NotCollatorSigned, ); @@ -1237,25 +1246,25 @@ impl CandidateCheckContext { // A candidate for a parachain without current validation code is not scheduled. .ok_or_else(|| Error::::UnscheduledCandidate)?; ensure!( - backed_candidate.descriptor().validation_code_hash == validation_code_hash, + backed_candidate_receipt.descriptor().validation_code_hash == validation_code_hash, Error::::InvalidValidationCodeHash, ); ensure!( - backed_candidate.descriptor().para_head == - backed_candidate.candidate.commitments.head_data.hash(), + backed_candidate_receipt.descriptor().para_head == + backed_candidate_receipt.commitments.head_data.hash(), Error::::ParaHeadMismatch, ); if let Err(err) = self.check_validation_outputs( para_id, relay_parent_number, - &backed_candidate.candidate.commitments.head_data, - &backed_candidate.candidate.commitments.new_validation_code, - backed_candidate.candidate.commitments.processed_downward_messages, - &backed_candidate.candidate.commitments.upward_messages, - BlockNumberFor::::from(backed_candidate.candidate.commitments.hrmp_watermark), - &backed_candidate.candidate.commitments.horizontal_messages, + &backed_candidate_receipt.commitments.head_data, + &backed_candidate_receipt.commitments.new_validation_code, + backed_candidate_receipt.commitments.processed_downward_messages, + &backed_candidate_receipt.commitments.upward_messages, + BlockNumberFor::::from(backed_candidate_receipt.commitments.hrmp_watermark), + &backed_candidate_receipt.commitments.horizontal_messages, ) { log::debug!( target: LOG_TARGET, diff --git a/polkadot/runtime/parachains/src/inclusion/tests.rs b/polkadot/runtime/parachains/src/inclusion/tests.rs index 557b7b71a9e326af0ab455619fbfe4fddb34d599..3fe7d7f0c7d4698c6b8301a41dca552e9c067509 100644 --- a/polkadot/runtime/parachains/src/inclusion/tests.rs +++ b/polkadot/runtime/parachains/src/inclusion/tests.rs @@ -47,10 +47,10 @@ use test_helpers::{dummy_collator, dummy_collator_signature, dummy_validation_co fn default_config() -> HostConfiguration { let mut config = HostConfiguration::default(); - config.coretime_cores = 1; + config.scheduler_params.num_cores = 1; config.max_code_size = 0b100000; config.max_head_data_size = 0b100000; - config.group_rotation_frequency = u32::MAX; + config.scheduler_params.group_rotation_frequency = u32::MAX; config } @@ -120,6 +120,7 @@ pub(crate) fn back_candidate( keystore: &KeystorePtr, signing_context: &SigningContext, kind: BackingKind, + core_index: Option, ) -> BackedCandidate { let mut validator_indices = bitvec::bitvec![u8, BitOrderLsb0; 0; group.len()]; let threshold = effective_minimum_backing_votes( @@ -155,15 +156,20 @@ pub(crate) fn back_candidate( validity_votes.push(ValidityAttestation::Explicit(signature).into()); } - let backed = BackedCandidate { candidate, validity_votes, validator_indices }; + let backed = + BackedCandidate::new(candidate, validity_votes, validator_indices.clone(), core_index); - let successfully_backed = - primitives::check_candidate_backing(&backed, signing_context, group.len(), |i| { - Some(validators[group[i].0 as usize].public().into()) - }) - .ok() - .unwrap_or(0) >= - threshold; + let successfully_backed = primitives::check_candidate_backing( + backed.candidate().hash(), + backed.validity_votes(), + validator_indices.as_bitslice(), + signing_context, + group.len(), + |i| Some(validators[group[i].0 as usize].public().into()), + ) + .ok() + .unwrap_or(0) >= + threshold; match kind { BackingKind::Unanimous | BackingKind::Threshold => assert!(successfully_backed), @@ -218,7 +224,7 @@ pub(crate) fn run_to_block( } pub(crate) fn expected_bits() -> usize { - Paras::parachains().len() + Configuration::config().coretime_cores as usize + Paras::parachains().len() + Configuration::config().scheduler_params.num_cores as usize } fn default_bitfield() -> AvailabilityBitfield { @@ -380,7 +386,7 @@ fn collect_pending_cleans_up_pending() { (thread_a, ParaKind::Parathread), ]; let mut config = genesis_config(paras); - config.configuration.config.group_rotation_frequency = 3; + config.configuration.config.scheduler_params.group_rotation_frequency = 3; new_test_ext(config).execute_with(|| { let default_candidate = TestCandidateBuilder::default().build(); >::insert( @@ -919,38 +925,16 @@ fn candidate_checks() { let thread_a_assignment = (thread_a, CoreIndex::from(2)); let allowed_relay_parents = default_allowed_relay_parent_tracker(); - // unscheduled candidate. - { - let mut candidate = TestCandidateBuilder { - para_id: chain_a, - relay_parent: System::parent_hash(), - pov_hash: Hash::repeat_byte(1), - persisted_validation_data_hash: make_vdata_hash(chain_a).unwrap(), - hrmp_watermark: RELAY_PARENT_NUM, - ..Default::default() - } - .build(); - collator_sign_candidate(Sr25519Keyring::One, &mut candidate); - - let backed = back_candidate( - candidate, - &validators, - group_validators(GroupIndex::from(0)).unwrap().as_ref(), - &keystore, - &signing_context, - BackingKind::Threshold, - ); - - assert_noop!( - ParaInclusion::process_candidates( - &allowed_relay_parents, - vec![backed], - &[chain_b_assignment].into_iter().collect(), - &group_validators, - ), - Error::::UnscheduledCandidate - ); - } + // no candidates. + assert_eq!( + ParaInclusion::process_candidates( + &allowed_relay_parents, + vec![], + &group_validators, + false + ), + Ok(ProcessedCandidates::default()) + ); // candidates out of order. { @@ -984,6 +968,7 @@ fn candidate_checks() { &keystore, &signing_context, BackingKind::Threshold, + None, ); let backed_b = back_candidate( @@ -993,15 +978,16 @@ fn candidate_checks() { &keystore, &signing_context, BackingKind::Threshold, + None, ); // out-of-order manifests as unscheduled. assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![backed_b, backed_a], - &[chain_a_assignment, chain_b_assignment].into_iter().collect(), + vec![(backed_b, chain_b_assignment.1), (backed_a, chain_a_assignment.1)], &group_validators, + false ), Error::::ScheduledOutOfOrder ); @@ -1027,14 +1013,15 @@ fn candidate_checks() { &keystore, &signing_context, BackingKind::Lacking, + None, ); assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![backed], - &[chain_a_assignment].into_iter().collect(), + vec![(backed, chain_a_assignment.1)], &group_validators, + false ), Error::::InsufficientBacking ); @@ -1075,6 +1062,7 @@ fn candidate_checks() { &keystore, &signing_context, BackingKind::Threshold, + None, ); let backed_b = back_candidate( @@ -1084,14 +1072,15 @@ fn candidate_checks() { &keystore, &signing_context, BackingKind::Threshold, + None, ); assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![backed_b, backed_a], - &[chain_a_assignment, chain_b_assignment].into_iter().collect(), + vec![(backed_b, chain_b_assignment.1), (backed_a, chain_a_assignment.1)], &group_validators, + false ), Error::::DisallowedRelayParent ); @@ -1122,14 +1111,15 @@ fn candidate_checks() { &keystore, &signing_context, BackingKind::Threshold, + None, ); assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![backed], - &[thread_a_assignment].into_iter().collect(), + vec![(backed, thread_a_assignment.1)], &group_validators, + false ), Error::::NotCollatorSigned ); @@ -1156,6 +1146,7 @@ fn candidate_checks() { &keystore, &signing_context, BackingKind::Threshold, + None, ); let candidate = TestCandidateBuilder::default().build(); @@ -1177,9 +1168,9 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![backed], - &[chain_a_assignment].into_iter().collect(), + vec![(backed, chain_a_assignment.1)], &group_validators, + false ), Error::::CandidateScheduledBeforeParaFree ); @@ -1212,14 +1203,15 @@ fn candidate_checks() { &keystore, &signing_context, BackingKind::Threshold, + None, ); assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![backed], - &[chain_a_assignment].into_iter().collect(), + vec![(backed, chain_a_assignment.1)], &group_validators, + false ), Error::::CandidateScheduledBeforeParaFree ); @@ -1249,6 +1241,7 @@ fn candidate_checks() { &keystore, &signing_context, BackingKind::Threshold, + None, ); { @@ -1267,9 +1260,9 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![backed], - &[chain_a_assignment].into_iter().collect(), + vec![(backed, chain_a_assignment.1)], &group_validators, + false ), Error::::PrematureCodeUpgrade ); @@ -1296,14 +1289,15 @@ fn candidate_checks() { &keystore, &signing_context, BackingKind::Threshold, + None, ); assert_eq!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![backed], - &[chain_a_assignment].into_iter().collect(), + vec![(backed, chain_a_assignment.1)], &group_validators, + false ), Err(Error::::ValidationDataHashMismatch.into()), ); @@ -1331,14 +1325,15 @@ fn candidate_checks() { &keystore, &signing_context, BackingKind::Threshold, + None, ); assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![backed], - &[chain_a_assignment].into_iter().collect(), + vec![(backed, chain_a_assignment.1)], &group_validators, + false ), Error::::InvalidValidationCodeHash ); @@ -1366,14 +1361,15 @@ fn candidate_checks() { &keystore, &signing_context, BackingKind::Threshold, + None, ); assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![backed], - &[chain_a_assignment].into_iter().collect(), + vec![(backed, chain_a_assignment.1)], &group_validators, + false ), Error::::ParaHeadMismatch ); @@ -1486,6 +1482,7 @@ fn backing_works() { &keystore, &signing_context, BackingKind::Threshold, + None, ); let backed_b = back_candidate( @@ -1495,6 +1492,7 @@ fn backing_works() { &keystore, &signing_context, BackingKind::Threshold, + None, ); let backed_c = back_candidate( @@ -1504,15 +1502,20 @@ fn backing_works() { &keystore, &signing_context, BackingKind::Threshold, + None, ); - let backed_candidates = vec![backed_a.clone(), backed_b.clone(), backed_c]; + let backed_candidates = vec![ + (backed_a.clone(), chain_a_assignment.1), + (backed_b.clone(), chain_b_assignment.1), + (backed_c, thread_a_assignment.1), + ]; let get_backing_group_idx = { // the order defines the group implicitly for this test case let backed_candidates_with_groups = backed_candidates .iter() .enumerate() - .map(|(idx, backed_candidate)| (backed_candidate.hash(), GroupIndex(idx as _))) + .map(|(idx, (backed_candidate, _))| (backed_candidate.hash(), GroupIndex(idx as _))) .collect::>(); move |candidate_hash_x: CandidateHash| -> Option { @@ -1532,10 +1535,8 @@ fn backing_works() { } = ParaInclusion::process_candidates( &allowed_relay_parents, backed_candidates.clone(), - &[chain_a_assignment, chain_b_assignment, thread_a_assignment] - .into_iter() - .collect(), &group_validators, + false, ) .expect("candidates scheduled, in order, and backed"); @@ -1554,22 +1555,22 @@ fn backing_works() { CandidateHash, (CandidateReceipt, Vec<(ValidatorIndex, ValidityAttestation)>), >::new(); - backed_candidates.into_iter().for_each(|backed_candidate| { + backed_candidates.into_iter().for_each(|(backed_candidate, _)| { let candidate_receipt_with_backers = intermediate .entry(backed_candidate.hash()) .or_insert_with(|| (backed_candidate.receipt(), Vec::new())); - - assert_eq!( - backed_candidate.validity_votes.len(), - backed_candidate.validator_indices.count_ones() - ); + let (validator_indices, None) = + backed_candidate.validator_indices_and_core_index(false) + else { + panic!("Expected no injected core index") + }; + assert_eq!(backed_candidate.validity_votes().len(), validator_indices.count_ones()); candidate_receipt_with_backers.1.extend( - backed_candidate - .validator_indices + validator_indices .iter() .enumerate() .filter(|(_, signed)| **signed) - .zip(backed_candidate.validity_votes.iter().cloned()) + .zip(backed_candidate.validity_votes().iter().cloned()) .filter_map(|((validator_index_within_group, _), attestation)| { let grp_idx = get_backing_group_idx(backed_candidate.hash()).unwrap(); group_validators(grp_idx).map(|validator_indices| { @@ -1666,6 +1667,257 @@ fn backing_works() { }); } +#[test] +fn backing_works_with_elastic_scaling_mvp() { + let chain_a = ParaId::from(1_u32); + let chain_b = ParaId::from(2_u32); + let thread_a = ParaId::from(3_u32); + + // The block number of the relay-parent for testing. + const RELAY_PARENT_NUM: BlockNumber = 4; + + let paras = vec![ + (chain_a, ParaKind::Parachain), + (chain_b, ParaKind::Parachain), + (thread_a, ParaKind::Parathread), + ]; + let validators = vec![ + Sr25519Keyring::Alice, + Sr25519Keyring::Bob, + Sr25519Keyring::Charlie, + Sr25519Keyring::Dave, + Sr25519Keyring::Ferdie, + ]; + let keystore: KeystorePtr = Arc::new(LocalKeystore::in_memory()); + for validator in validators.iter() { + Keystore::sr25519_generate_new( + &*keystore, + PARACHAIN_KEY_TYPE_ID, + Some(&validator.to_seed()), + ) + .unwrap(); + } + let validator_public = validator_pubkeys(&validators); + + new_test_ext(genesis_config(paras)).execute_with(|| { + shared::Pallet::::set_active_validators_ascending(validator_public.clone()); + shared::Pallet::::set_session_index(5); + + run_to_block(5, |_| None); + + let signing_context = + SigningContext { parent_hash: System::parent_hash(), session_index: 5 }; + + let group_validators = |group_index: GroupIndex| { + match group_index { + group_index if group_index == GroupIndex::from(0) => Some(vec![0, 1]), + group_index if group_index == GroupIndex::from(1) => Some(vec![2, 3]), + group_index if group_index == GroupIndex::from(2) => Some(vec![4]), + _ => panic!("Group index out of bounds for 2 parachains and 1 parathread core"), + } + .map(|vs| vs.into_iter().map(ValidatorIndex).collect::>()) + }; + + // When processing candidates, we compute the group index from scheduler. + let validator_groups = vec![ + vec![ValidatorIndex(0), ValidatorIndex(1)], + vec![ValidatorIndex(2), ValidatorIndex(3)], + vec![ValidatorIndex(4)], + ]; + Scheduler::set_validator_groups(validator_groups); + + let allowed_relay_parents = default_allowed_relay_parent_tracker(); + + let mut candidate_a = TestCandidateBuilder { + para_id: chain_a, + relay_parent: System::parent_hash(), + pov_hash: Hash::repeat_byte(1), + persisted_validation_data_hash: make_vdata_hash(chain_a).unwrap(), + hrmp_watermark: RELAY_PARENT_NUM, + ..Default::default() + } + .build(); + collator_sign_candidate(Sr25519Keyring::One, &mut candidate_a); + + let mut candidate_b_1 = TestCandidateBuilder { + para_id: chain_b, + relay_parent: System::parent_hash(), + pov_hash: Hash::repeat_byte(2), + persisted_validation_data_hash: make_vdata_hash(chain_b).unwrap(), + hrmp_watermark: RELAY_PARENT_NUM, + ..Default::default() + } + .build(); + collator_sign_candidate(Sr25519Keyring::One, &mut candidate_b_1); + + let mut candidate_b_2 = TestCandidateBuilder { + para_id: chain_b, + relay_parent: System::parent_hash(), + pov_hash: Hash::repeat_byte(3), + persisted_validation_data_hash: make_vdata_hash(chain_b).unwrap(), + hrmp_watermark: RELAY_PARENT_NUM, + ..Default::default() + } + .build(); + collator_sign_candidate(Sr25519Keyring::One, &mut candidate_b_2); + + let backed_a = back_candidate( + candidate_a.clone(), + &validators, + group_validators(GroupIndex::from(0)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + None, + ); + + let backed_b_1 = back_candidate( + candidate_b_1.clone(), + &validators, + group_validators(GroupIndex::from(1)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + Some(CoreIndex(1)), + ); + + let backed_b_2 = back_candidate( + candidate_b_2.clone(), + &validators, + group_validators(GroupIndex::from(2)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + Some(CoreIndex(2)), + ); + + let backed_candidates = vec![ + (backed_a.clone(), CoreIndex(0)), + (backed_b_1.clone(), CoreIndex(1)), + (backed_b_2.clone(), CoreIndex(2)), + ]; + let get_backing_group_idx = { + // the order defines the group implicitly for this test case + let backed_candidates_with_groups = backed_candidates + .iter() + .enumerate() + .map(|(idx, (backed_candidate, _))| (backed_candidate.hash(), GroupIndex(idx as _))) + .collect::>(); + + move |candidate_hash_x: CandidateHash| -> Option { + backed_candidates_with_groups.iter().find_map(|(candidate_hash, grp)| { + if *candidate_hash == candidate_hash_x { + Some(*grp) + } else { + None + } + }) + } + }; + + let ProcessedCandidates { + core_indices: occupied_cores, + candidate_receipt_with_backing_validator_indices, + } = ParaInclusion::process_candidates( + &allowed_relay_parents, + backed_candidates.clone(), + &group_validators, + true, + ) + .expect("candidates scheduled, in order, and backed"); + + // Both b candidates will be backed. However, only one will be recorded on-chain and proceed + // with being made available. + assert_eq!( + occupied_cores, + vec![ + (CoreIndex::from(0), chain_a), + (CoreIndex::from(1), chain_b), + (CoreIndex::from(2), chain_b), + ] + ); + + // Transform the votes into the setup we expect + let mut expected = std::collections::HashMap::< + CandidateHash, + (CandidateReceipt, Vec<(ValidatorIndex, ValidityAttestation)>), + >::new(); + backed_candidates.into_iter().for_each(|(backed_candidate, _)| { + let candidate_receipt_with_backers = expected + .entry(backed_candidate.hash()) + .or_insert_with(|| (backed_candidate.receipt(), Vec::new())); + let (validator_indices, _maybe_core_index) = + backed_candidate.validator_indices_and_core_index(true); + assert_eq!(backed_candidate.validity_votes().len(), validator_indices.count_ones()); + candidate_receipt_with_backers.1.extend( + validator_indices + .iter() + .enumerate() + .filter(|(_, signed)| **signed) + .zip(backed_candidate.validity_votes().iter().cloned()) + .filter_map(|((validator_index_within_group, _), attestation)| { + let grp_idx = get_backing_group_idx(backed_candidate.hash()).unwrap(); + group_validators(grp_idx).map(|validator_indices| { + (validator_indices[validator_index_within_group], attestation) + }) + }), + ); + }); + + assert_eq!( + expected, + candidate_receipt_with_backing_validator_indices + .into_iter() + .map(|c| (c.0.hash(), c)) + .collect() + ); + + let backers = { + let num_backers = effective_minimum_backing_votes( + group_validators(GroupIndex(0)).unwrap().len(), + configuration::Pallet::::config().minimum_backing_votes, + ); + backing_bitfield(&(0..num_backers).collect::>()) + }; + assert_eq!( + >::get(&chain_a), + Some(CandidatePendingAvailability { + core: CoreIndex::from(0), + hash: candidate_a.hash(), + descriptor: candidate_a.descriptor, + availability_votes: default_availability_votes(), + relay_parent_number: System::block_number() - 1, + backed_in_number: System::block_number(), + backers, + backing_group: GroupIndex::from(0), + }) + ); + assert_eq!( + >::get(&chain_a), + Some(candidate_a.commitments), + ); + + // Only one candidate for b will be recorded on chain. + assert_eq!( + >::get(&chain_b), + Some(CandidatePendingAvailability { + core: CoreIndex::from(2), + hash: candidate_b_2.hash(), + descriptor: candidate_b_2.descriptor, + availability_votes: default_availability_votes(), + relay_parent_number: System::block_number() - 1, + backed_in_number: System::block_number(), + backers: backing_bitfield(&[4]), + backing_group: GroupIndex::from(2), + }) + ); + assert_eq!( + >::get(&chain_b), + Some(candidate_b_2.commitments), + ); + }); +} + #[test] fn can_include_candidate_with_ok_code_upgrade() { let chain_a = ParaId::from(1_u32); @@ -1740,14 +1992,15 @@ fn can_include_candidate_with_ok_code_upgrade() { &keystore, &signing_context, BackingKind::Threshold, + None, ); let ProcessedCandidates { core_indices: occupied_cores, .. } = ParaInclusion::process_candidates( &allowed_relay_parents, - vec![backed_a], - &[chain_a_assignment].into_iter().collect(), + vec![(backed_a, chain_a_assignment.1)], &group_validators, + false, ) .expect("candidates scheduled, in order, and backed"); @@ -1809,7 +2062,7 @@ fn check_allowed_relay_parents() { } let validator_public = validator_pubkeys(&validators); let mut config = genesis_config(paras); - config.configuration.config.group_rotation_frequency = 1; + config.configuration.config.scheduler_params.group_rotation_frequency = 1; new_test_ext(config).execute_with(|| { shared::Pallet::::set_active_validators_ascending(validator_public.clone()); @@ -1932,6 +2185,7 @@ fn check_allowed_relay_parents() { &keystore, &signing_context_a, BackingKind::Threshold, + None, ); let backed_b = back_candidate( @@ -1941,6 +2195,7 @@ fn check_allowed_relay_parents() { &keystore, &signing_context_b, BackingKind::Threshold, + None, ); let backed_c = back_candidate( @@ -1950,17 +2205,20 @@ fn check_allowed_relay_parents() { &keystore, &signing_context_c, BackingKind::Threshold, + None, ); - let backed_candidates = vec![backed_a, backed_b, backed_c]; + let backed_candidates = vec![ + (backed_a, chain_a_assignment.1), + (backed_b, chain_b_assignment.1), + (backed_c, thread_a_assignment.1), + ]; ParaInclusion::process_candidates( &allowed_relay_parents, backed_candidates.clone(), - &[chain_a_assignment, chain_b_assignment, thread_a_assignment] - .into_iter() - .collect(), &group_validators, + false, ) .expect("candidates scheduled, in order, and backed"); }); @@ -2189,14 +2447,15 @@ fn para_upgrade_delay_scheduled_from_inclusion() { &keystore, &signing_context, BackingKind::Threshold, + None, ); let ProcessedCandidates { core_indices: occupied_cores, .. } = ParaInclusion::process_candidates( &allowed_relay_parents, - vec![backed_a], - &[chain_a_assignment].into_iter().collect(), + vec![(backed_a, chain_a_assignment.1)], &group_validators, + false, ) .expect("candidates scheduled, in order, and backed"); diff --git a/polkadot/runtime/parachains/src/mock.rs b/polkadot/runtime/parachains/src/mock.rs index 1925ca19501accce8148e23d5e34c8f6625097a2..e18be03bdeb230d8959e3a1e76828d499c1c52bb 100644 --- a/polkadot/runtime/parachains/src/mock.rs +++ b/polkadot/runtime/parachains/src/mock.rs @@ -23,7 +23,7 @@ use crate::{ initializer, origin, paras, paras::ParaKind, paras_inherent, scheduler, - scheduler::common::{AssignmentProvider, AssignmentProviderConfig}, + scheduler::common::AssignmentProvider, session_info, shared, ParaId, }; use frame_support::pallet_prelude::*; @@ -463,10 +463,6 @@ pub mod mock_assigner { pub(super) type MockAssignmentQueue = StorageValue<_, VecDeque, ValueQuery>; - #[pallet::storage] - pub(super) type MockProviderConfig = - StorageValue<_, AssignmentProviderConfig, OptionQuery>; - #[pallet::storage] pub(super) type MockCoreCount = StorageValue<_, u32, OptionQuery>; } @@ -478,12 +474,6 @@ pub mod mock_assigner { MockAssignmentQueue::::mutate(|queue| queue.push_back(assignment)); } - // This configuration needs to be customized to service `get_provider_config` in - // scheduler tests. - pub fn set_assignment_provider_config(config: AssignmentProviderConfig) { - MockProviderConfig::::set(Some(config)); - } - // Allows for customized core count in scheduler tests, rather than a core count // derived from on-demand config + parachain count. pub fn set_core_count(count: u32) { @@ -512,17 +502,6 @@ pub mod mock_assigner { // in the mock assigner. fn push_back_assignment(_assignment: Assignment) {} - // Gets the provider config we set earlier using `set_assignment_provider_config`, falling - // back to the on demand parachain configuration if none was set. - fn get_provider_config(_core_idx: CoreIndex) -> AssignmentProviderConfig { - match MockProviderConfig::::get() { - Some(config) => config, - None => AssignmentProviderConfig { - max_availability_timeouts: 1, - ttl: BlockNumber::from(5u32), - }, - } - } #[cfg(any(feature = "runtime-benchmarks", test))] fn get_mock_assignment(_: CoreIndex, para_id: ParaId) -> Assignment { Assignment::Bulk(para_id) diff --git a/polkadot/runtime/parachains/src/paras/mod.rs b/polkadot/runtime/parachains/src/paras/mod.rs index 39371391b1f09cc57d919ea85a7591689e2e04d6..5acdb9683bbf17349f35f280c6a268c49ebb5f39 100644 --- a/polkadot/runtime/parachains/src/paras/mod.rs +++ b/polkadot/runtime/parachains/src/paras/mod.rs @@ -1374,7 +1374,7 @@ impl Pallet { outgoing } - // note replacement of the code of para with given `id`, which occured in the + // note replacement of the code of para with given `id`, which occurred in the // context of the given relay-chain block number. provide the replaced code. // // `at` for para-triggered replacement is the block number of the relay-chain diff --git a/polkadot/runtime/parachains/src/paras/tests.rs b/polkadot/runtime/parachains/src/paras/tests.rs index 24ea919ec8754a50d6fb9507739a48c8a3eb1120..262ec9d3fdba95c0368d4cfe03b288ccaeb049d1 100644 --- a/polkadot/runtime/parachains/src/paras/tests.rs +++ b/polkadot/runtime/parachains/src/paras/tests.rs @@ -17,7 +17,7 @@ use super::*; use frame_support::{assert_err, assert_ok, assert_storage_noop}; use keyring::Sr25519Keyring; -use primitives::{BlockNumber, PARACHAIN_KEY_TYPE_ID}; +use primitives::{vstaging::SchedulerParams, BlockNumber, PARACHAIN_KEY_TYPE_ID}; use sc_keystore::LocalKeystore; use sp_keystore::{Keystore, KeystorePtr}; use std::sync::Arc; @@ -909,7 +909,10 @@ fn full_parachain_cleanup_storage() { minimum_validation_upgrade_delay: 2, // Those are not relevant to this test. However, HostConfiguration is still a // subject for the consistency check. - paras_availability_period: 1, + scheduler_params: SchedulerParams { + paras_availability_period: 1, + ..Default::default() + }, ..Default::default() }, }, diff --git a/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs b/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs index 0f6b23ae1b39213f8afbd37798f8f7f31a024cf9..ad3fa8e0dc712b64141231250e251498803c52b1 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs @@ -120,7 +120,7 @@ benchmarks! { // with `v` validity votes. // let votes = v as usize; let votes = min(scheduler::Pallet::::group_validators(GroupIndex::from(0)).unwrap().len(), v as usize); - assert_eq!(benchmark.backed_candidates.get(0).unwrap().validity_votes.len(), votes); + assert_eq!(benchmark.backed_candidates.get(0).unwrap().validity_votes().len(), votes); benchmark.bitfields.clear(); benchmark.disputes.clear(); @@ -177,7 +177,7 @@ benchmarks! { // There is 1 backed assert_eq!(benchmark.backed_candidates.len(), 1); assert_eq!( - benchmark.backed_candidates.get(0).unwrap().validity_votes.len(), + benchmark.backed_candidates.get(0).unwrap().validity_votes().len(), votes, ); diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 81e092f0a991cd216bac3c1d8e48ae8bcea59144..723a15bdba7a88d73a2610b90910f53c6150475f 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -43,15 +43,14 @@ use frame_support::{ use frame_system::pallet_prelude::*; use pallet_babe::{self, ParentBlockRandomness}; use primitives::{ - effective_minimum_backing_votes, BackedCandidate, CandidateHash, CandidateReceipt, - CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, CoreIndex, DisputeStatementSet, - InherentData as ParachainsInherentData, MultiDisputeStatementSet, ScrapedOnChainVotes, - SessionIndex, SignedAvailabilityBitfields, SigningContext, UncheckedSignedAvailabilityBitfield, - UncheckedSignedAvailabilityBitfields, ValidatorId, ValidatorIndex, ValidityAttestation, - PARACHAINS_INHERENT_IDENTIFIER, + effective_minimum_backing_votes, vstaging::node_features::FeatureIndex, BackedCandidate, + CandidateHash, CandidateReceipt, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, + CoreIndex, DisputeStatementSet, InherentData as ParachainsInherentData, + MultiDisputeStatementSet, ScrapedOnChainVotes, SessionIndex, SignedAvailabilityBitfields, + SigningContext, UncheckedSignedAvailabilityBitfield, UncheckedSignedAvailabilityBitfields, + ValidatorId, ValidatorIndex, ValidityAttestation, PARACHAINS_INHERENT_IDENTIFIER, }; use rand::{seq::SliceRandom, SeedableRng}; - use scale_info::TypeInfo; use sp_runtime::traits::{Header as HeaderT, One}; use sp_std::{ @@ -145,6 +144,10 @@ pub mod pallet { DisputeInvalid, /// A candidate was backed by a disabled validator BackedByDisabled, + /// A candidate was backed even though the paraid was not scheduled. + BackedOnUnscheduledCore, + /// Too many candidates supplied. + UnscheduledCandidate, } /// Whether the paras inherent was included within this block. @@ -585,25 +588,39 @@ impl Pallet { let freed = collect_all_freed_cores::(freed_concluded.iter().cloned()); >::free_cores_and_fill_claimqueue(freed, now); - let scheduled = >::scheduled_paras() - .map(|(core_idx, para_id)| (para_id, core_idx)) - .collect(); METRICS.on_candidates_processed_total(backed_candidates.len() as u64); - let SanitizedBackedCandidates { backed_candidates, votes_from_disabled_were_dropped } = - sanitize_backed_candidates::( - backed_candidates, - &allowed_relay_parents, - |candidate_idx: usize, - backed_candidate: &BackedCandidate<::Hash>| - -> bool { - let para_id = backed_candidate.descriptor().para_id; - let prev_context = >::para_most_recent_context(para_id); - let check_ctx = CandidateCheckContext::::new(prev_context); - - // never include a concluded-invalid candidate - current_concluded_invalid_disputes.contains(&backed_candidate.hash()) || + let core_index_enabled = configuration::Pallet::::config() + .node_features + .get(FeatureIndex::ElasticScalingMVP as usize) + .map(|b| *b) + .unwrap_or(false); + + let mut scheduled: BTreeMap> = BTreeMap::new(); + let mut total_scheduled_cores = 0; + + for (core_idx, para_id) in >::scheduled_paras() { + total_scheduled_cores += 1; + scheduled.entry(para_id).or_default().insert(core_idx); + } + + let SanitizedBackedCandidates { + backed_candidates_with_core, + votes_from_disabled_were_dropped, + dropped_unscheduled_candidates, + } = sanitize_backed_candidates::( + backed_candidates, + &allowed_relay_parents, + |candidate_idx: usize, + backed_candidate: &BackedCandidate<::Hash>| + -> bool { + let para_id = backed_candidate.descriptor().para_id; + let prev_context = >::para_most_recent_context(para_id); + let check_ctx = CandidateCheckContext::::new(prev_context); + + // never include a concluded-invalid candidate + current_concluded_invalid_disputes.contains(&backed_candidate.hash()) || // Instead of checking the candidates with code upgrades twice // move the checking up here and skip it in the training wheels fallback. // That way we avoid possible duplicate checks while assuring all @@ -611,13 +628,19 @@ impl Pallet { // // NOTE: this is the only place where we check the relay-parent. check_ctx - .verify_backed_candidate(&allowed_relay_parents, candidate_idx, backed_candidate) + .verify_backed_candidate(&allowed_relay_parents, candidate_idx, backed_candidate.candidate()) .is_err() - }, - &scheduled, - ); + }, + scheduled, + core_index_enabled, + ); - METRICS.on_candidates_sanitized(backed_candidates.len() as u64); + ensure!( + backed_candidates_with_core.len() <= total_scheduled_cores, + Error::::UnscheduledCandidate + ); + + METRICS.on_candidates_sanitized(backed_candidates_with_core.len() as u64); // In `Enter` context (invoked during execution) there should be no backing votes from // disabled validators because they should have been filtered out during inherent data @@ -626,15 +649,22 @@ impl Pallet { ensure!(!votes_from_disabled_were_dropped, Error::::BackedByDisabled); } + // In `Enter` context (invoked during execution) we shouldn't have filtered any candidates + // due to a para not being scheduled. They have been filtered during inherent data + // preparation (`ProvideInherent` context). Abort in such cases. + if context == ProcessInherentDataContext::Enter { + ensure!(!dropped_unscheduled_candidates, Error::::BackedOnUnscheduledCore); + } + // Process backed candidates according to scheduled cores. let inclusion::ProcessedCandidates::< as HeaderT>::Hash> { core_indices: occupied, candidate_receipt_with_backing_validator_indices, } = >::process_candidates( &allowed_relay_parents, - backed_candidates.clone(), - &scheduled, + backed_candidates_with_core.clone(), >::group_validators, + core_index_enabled, )?; // Note which of the scheduled cores were actually occupied by a backed candidate. >::occupied(occupied.into_iter().map(|e| (e.0, e.1)).collect()); @@ -651,8 +681,15 @@ impl Pallet { let bitfields = bitfields.into_iter().map(|v| v.into_unchecked()).collect(); - let processed = - ParachainsInherentData { bitfields, backed_candidates, disputes, parent_header }; + let processed = ParachainsInherentData { + bitfields, + backed_candidates: backed_candidates_with_core + .into_iter() + .map(|(candidate, _)| candidate) + .collect(), + disputes, + parent_header, + }; Ok((processed, Some(all_weight_after).into())) } } @@ -751,7 +788,7 @@ fn random_sel Weight>( /// Assumes disputes are already filtered by the time this is called. /// /// Returns the total weight consumed by `bitfields` and `candidates`. -fn apply_weight_limit( +pub(crate) fn apply_weight_limit( candidates: &mut Vec::Hash>>, bitfields: &mut UncheckedSignedAvailabilityBitfields, max_consumable_weight: Weight, @@ -768,35 +805,71 @@ fn apply_weight_limit( return total } - // Prefer code upgrades, they tend to be large and hence stand no chance to be picked - // late while maintaining the weight bounds. - let preferred_indices = candidates + // Invariant: block author provides candidate in the order in which they form a chain + // wrt elastic scaling. If the invariant is broken, we'd fail later when filtering candidates + // which are unchained. + + let mut chained_candidates: Vec> = Vec::new(); + let mut current_para_id = None; + + for candidate in sp_std::mem::take(candidates).into_iter() { + let candidate_para_id = candidate.descriptor().para_id; + if Some(candidate_para_id) == current_para_id { + let chain = chained_candidates + .last_mut() + .expect("if the current_para_id is Some, then vec is not empty; qed"); + chain.push(candidate); + } else { + current_para_id = Some(candidate_para_id); + chained_candidates.push(vec![candidate]); + } + } + + // Elastic scaling: we prefer chains that have a code upgrade among the candidates, + // as the candidates containing the upgrade tend to be large and hence stand no chance to + // be picked late while maintaining the weight bounds. + // + // Limitations: For simplicity if total weight of a chain of candidates is larger than + // the remaining weight, the chain will still not be included while it could still be possible + // to include part of that chain. + let preferred_chain_indices = chained_candidates .iter() .enumerate() - .filter_map(|(idx, candidate)| { - candidate.candidate.commitments.new_validation_code.as_ref().map(|_code| idx) + .filter_map(|(idx, candidates)| { + // Check if any of the candidate in chain contains a code upgrade. + if candidates + .iter() + .any(|candidate| candidate.candidate().commitments.new_validation_code.is_some()) + { + Some(idx) + } else { + None + } }) .collect::>(); - // There is weight remaining to be consumed by a subset of candidates + // There is weight remaining to be consumed by a subset of chained candidates // which are going to be picked now. if let Some(max_consumable_by_candidates) = max_consumable_weight.checked_sub(&total_bitfields_weight) { - let (acc_candidate_weight, indices) = - random_sel::::Hash>, _>( + let (acc_candidate_weight, chained_indices) = + random_sel::::Hash>>, _>( rng, - &candidates, - preferred_indices, - |c| backed_candidate_weight::(c), + &chained_candidates, + preferred_chain_indices, + |candidates| backed_candidates_weight::(&candidates), max_consumable_by_candidates, ); - log::debug!(target: LOG_TARGET, "Indices Candidates: {:?}, size: {}", indices, candidates.len()); - candidates.indexed_retain(|idx, _backed_candidate| indices.binary_search(&idx).is_ok()); + log::debug!(target: LOG_TARGET, "Indices Candidates: {:?}, size: {}", chained_indices, candidates.len()); + chained_candidates + .indexed_retain(|idx, _backed_candidates| chained_indices.binary_search(&idx).is_ok()); // pick all bitfields, and // fill the remaining space with candidates let total_consumed = acc_candidate_weight.saturating_add(total_bitfields_weight); + *candidates = chained_candidates.into_iter().flatten().collect::>(); + return total_consumed } @@ -916,16 +989,22 @@ pub(crate) fn sanitize_bitfields( // Result from `sanitize_backed_candidates` #[derive(Debug, PartialEq)] struct SanitizedBackedCandidates { - // Sanitized backed candidates. The `Vec` is sorted according to the occupied core index. - backed_candidates: Vec>, + // Sanitized backed candidates along with the assigned core. The `Vec` is sorted according to + // the occupied core index. + backed_candidates_with_core: Vec<(BackedCandidate, CoreIndex)>, // Set to true if any votes from disabled validators were dropped from the input. votes_from_disabled_were_dropped: bool, + // Set to true if any candidates were dropped due to filtering done in + // `map_candidates_to_cores` + dropped_unscheduled_candidates: bool, } /// Filter out: /// 1. any candidates that have a concluded invalid dispute -/// 2. all backing votes from disabled validators -/// 3. any candidates that end up with less than `effective_minimum_backing_votes` backing votes +/// 2. any unscheduled candidates, as well as candidates whose paraid has multiple cores assigned +/// but have no injected core index. +/// 3. all backing votes from disabled validators +/// 4. any candidates that end up with less than `effective_minimum_backing_votes` backing votes /// /// `scheduled` follows the same naming scheme as provided in the /// guide: Currently `free` but might become `occupied`. @@ -944,7 +1023,8 @@ fn sanitize_backed_candidates< mut backed_candidates: Vec>, allowed_relay_parents: &AllowedRelayParentsTracker>, mut candidate_has_concluded_invalid_dispute_or_is_invalid: F, - scheduled: &BTreeMap, + scheduled: BTreeMap>, + core_index_enabled: bool, ) -> SanitizedBackedCandidates { // Remove any candidates that were concluded invalid. // This does not assume sorting. @@ -952,22 +1032,23 @@ fn sanitize_backed_candidates< !candidate_has_concluded_invalid_dispute_or_is_invalid(candidate_idx, backed_candidate) }); - // Assure the backed candidate's `ParaId`'s core is free. - // This holds under the assumption that `Scheduler::schedule` is called _before_. - // We don't check the relay-parent because this is done in the closure when - // constructing the inherent and during actual processing otherwise. - - backed_candidates.retain(|backed_candidate| { - let desc = backed_candidate.descriptor(); + let initial_candidate_count = backed_candidates.len(); + // Map candidates to scheduled cores. Filter out any unscheduled candidates. + let mut backed_candidates_with_core = map_candidates_to_cores::( + &allowed_relay_parents, + scheduled, + core_index_enabled, + backed_candidates, + ); - scheduled.get(&desc.para_id).is_some() - }); + let dropped_unscheduled_candidates = + initial_candidate_count != backed_candidates_with_core.len(); // Filter out backing statements from disabled validators - let dropped_disabled = filter_backed_statements_from_disabled_validators::( - &mut backed_candidates, + let votes_from_disabled_were_dropped = filter_backed_statements_from_disabled_validators::( + &mut backed_candidates_with_core, &allowed_relay_parents, - scheduled, + core_index_enabled, ); // Sort the `Vec` last, once there is a guarantee that these @@ -975,14 +1056,12 @@ fn sanitize_backed_candidates< // but more importantly are scheduled for a free core. // This both avoids extra work for obviously invalid candidates, // but also allows this to be done in place. - backed_candidates.sort_by(|x, y| { - // Never panics, since we filtered all panic arguments out in the previous `fn retain`. - scheduled[&x.descriptor().para_id].cmp(&scheduled[&y.descriptor().para_id]) - }); + backed_candidates_with_core.sort_by(|(_x, core_x), (_y, core_y)| core_x.cmp(&core_y)); SanitizedBackedCandidates { - backed_candidates, - votes_from_disabled_were_dropped: dropped_disabled, + dropped_unscheduled_candidates, + votes_from_disabled_were_dropped, + backed_candidates_with_core, } } @@ -1071,9 +1150,12 @@ fn limit_and_sanitize_disputes< // few more sanity checks. Returns `true` if at least one statement is removed and `false` // otherwise. fn filter_backed_statements_from_disabled_validators( - backed_candidates: &mut Vec::Hash>>, + backed_candidates_with_core: &mut Vec<( + BackedCandidate<::Hash>, + CoreIndex, + )>, allowed_relay_parents: &AllowedRelayParentsTracker>, - scheduled: &BTreeMap, + core_index_enabled: bool, ) -> bool { let disabled_validators = BTreeSet::<_>::from_iter(shared::Pallet::::disabled_validators().into_iter()); @@ -1083,7 +1165,7 @@ fn filter_backed_statements_from_disabled_validators *core_idx, - None => { - log::debug!(target: LOG_TARGET, "Can't get core idx of a backed candidate for para id {:?}. Dropping the candidate.", bc.descriptor().para_id); - return false - } - }; + backed_candidates_with_core.retain_mut(|(bc, core_idx)| { + let (validator_indices, maybe_core_index) = bc.validator_indices_and_core_index(core_index_enabled); + let mut validator_indices = BitVec::<_>::from(validator_indices); // Get relay parent block number of the candidate. We need this to get the group index assigned to this core at this block number let relay_parent_block_number = match allowed_relay_parents @@ -1116,7 +1192,7 @@ fn filter_backed_statements_from_disabled_validators>::group_assigned_to_core( - core_idx, + *core_idx, relay_parent_block_number + One::one(), ) { Some(group_idx) => group_idx, @@ -1138,12 +1214,15 @@ fn filter_backed_statements_from_disabled_validators::from_iter(validator_group.iter().map(|idx| disabled_validators.contains(idx))); // The indices of statements from disabled validators in `BackedCandidate`. We have to drop these. - let indices_to_drop = disabled_indices.clone() & &bc.validator_indices; + let indices_to_drop = disabled_indices.clone() & &validator_indices; // Apply the bitmask to drop the disabled validator from `validator_indices` - bc.validator_indices &= !disabled_indices; + validator_indices &= !disabled_indices; + // Update the backed candidate + bc.set_validator_indices_and_core_index(validator_indices, maybe_core_index); + // Remove the corresponding votes from `validity_votes` for idx in indices_to_drop.iter_ones().rev() { - bc.validity_votes.remove(idx); + bc.validity_votes_mut().remove(idx); } // If at least one statement was dropped we need to return `true` @@ -1154,10 +1233,9 @@ fn filter_backed_statements_from_disabled_validators( + allowed_relay_parents: &AllowedRelayParentsTracker>, + mut scheduled: BTreeMap>, + core_index_enabled: bool, + candidates: Vec>, +) -> Vec<(BackedCandidate, CoreIndex)> { + let mut backed_candidates_with_core = Vec::with_capacity(candidates.len()); + + // We keep a candidate if the parachain has only one core assigned or if + // a core index is provided by block author and it's indeed scheduled. + for backed_candidate in candidates { + let maybe_injected_core_index = get_injected_core_index::( + allowed_relay_parents, + &backed_candidate, + core_index_enabled, + ); + + let scheduled_cores = scheduled.get_mut(&backed_candidate.descriptor().para_id); + // Candidates without scheduled cores are silently filtered out. + if let Some(scheduled_cores) = scheduled_cores { + if let Some(core_idx) = maybe_injected_core_index { + if scheduled_cores.contains(&core_idx) { + scheduled_cores.remove(&core_idx); + backed_candidates_with_core.push((backed_candidate, core_idx)); + } + } else if scheduled_cores.len() == 1 { + backed_candidates_with_core + .push((backed_candidate, scheduled_cores.pop_first().expect("Length is 1"))); + } + } + } + + backed_candidates_with_core +} + +fn get_injected_core_index( + allowed_relay_parents: &AllowedRelayParentsTracker>, + candidate: &BackedCandidate, + core_index_enabled: bool, +) -> Option { + // After stripping the 8 bit extensions, the `validator_indices` field length is expected + // to be equal to backing group size. If these don't match, the `CoreIndex` is badly encoded, + // or not supported. + let (validator_indices, maybe_core_idx) = + candidate.validator_indices_and_core_index(core_index_enabled); + + let Some(core_idx) = maybe_core_idx else { return None }; + + let relay_parent_block_number = + match allowed_relay_parents.acquire_info(candidate.descriptor().relay_parent, None) { + Some((_, block_num)) => block_num, + None => { + log::debug!( + target: LOG_TARGET, + "Relay parent {:?} for candidate {:?} is not in the allowed relay parents. Dropping the candidate.", + candidate.descriptor().relay_parent, + candidate.candidate().hash(), + ); + return None + }, + }; + + // Get the backing group of the candidate backed at `core_idx`. + let group_idx = match >::group_assigned_to_core( + core_idx, + relay_parent_block_number + One::one(), + ) { + Some(group_idx) => group_idx, + None => { + log::debug!( + target: LOG_TARGET, + "Can't get the group index for core idx {:?}. Dropping the candidate {:?}.", + core_idx, + candidate.candidate().hash(), + ); + return None + }, + }; + + let group_validators = match >::group_validators(group_idx) { + Some(validators) => validators, + None => return None, + }; + + if group_validators.len() == validator_indices.len() { + Some(core_idx) + } else { + None + } } diff --git a/polkadot/runtime/parachains/src/paras_inherent/tests.rs b/polkadot/runtime/parachains/src/paras_inherent/tests.rs index 6f3eac35685a82b32c69b27e5379620988c956b7..fb4d1bd2226ed77c406a062c78a1ca26c4958e40 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/tests.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/tests.rs @@ -22,15 +22,19 @@ use super::*; #[cfg(not(feature = "runtime-benchmarks"))] mod enter { - use super::*; + use super::{inclusion::tests::TestCandidateBuilder, *}; use crate::{ builder::{Bench, BenchBuilder}, mock::{mock_assigner, new_test_ext, BlockLength, BlockWeights, MockGenesisConfig, Test}, - scheduler::common::Assignment, + scheduler::{ + common::{Assignment, AssignmentProvider}, + ParasEntry, + }, }; use assert_matches::assert_matches; use frame_support::assert_ok; use frame_system::limits; + use primitives::vstaging::SchedulerParams; use sp_runtime::Perbill; use sp_std::collections::btree_map::BTreeMap; @@ -84,7 +88,7 @@ mod enter { // `create_inherent` and will not cause `enter` to early. fn include_backed_candidates() { let config = MockGenesisConfig::default(); - assert!(config.configuration.config.scheduling_lookahead > 0); + assert!(config.configuration.config.scheduler_params.lookahead > 0); new_test_ext(config).execute_with(|| { let dispute_statements = BTreeMap::new(); @@ -622,7 +626,7 @@ mod enter { #[test] fn limit_candidates_over_weight_1() { let config = MockGenesisConfig::default(); - assert!(config.configuration.config.scheduling_lookahead > 0); + assert!(config.configuration.config.scheduler_params.lookahead > 0); new_test_ext(config).execute_with(|| { // Create the inherent data for this block @@ -697,6 +701,25 @@ mod enter { 2 ); + // One core was scheduled. We should put the assignment back, before calling enter(). + let now = >::block_number() + 1; + let used_cores = 5; + let cores = (0..used_cores) + .into_iter() + .map(|i| { + let SchedulerParams { ttl, .. } = + >::config().scheduler_params; + // Load an assignment into provider so that one is present to pop + let assignment = + ::AssignmentProvider::get_mock_assignment( + CoreIndex(i), + ParaId::from(i), + ); + (CoreIndex(i), [ParasEntry::new(assignment, now + ttl)].into()) + }) + .collect(); + scheduler::ClaimQueue::::set(cores); + assert_ok!(Pallet::::enter( frame_system::RawOrigin::None.into(), limit_inherent_data, @@ -902,6 +925,129 @@ mod enter { }); } + // Helper fn that builds chained dummy candidates for elastic scaling tests + fn build_backed_candidate_chain( + para_id: ParaId, + len: usize, + start_core_index: usize, + code_upgrade_index: Option, + ) -> Vec { + if let Some(code_upgrade_index) = code_upgrade_index { + assert!(code_upgrade_index < len, "Code upgrade index out of bounds"); + } + + (0..len) + .into_iter() + .map(|idx| { + let mut builder = TestCandidateBuilder::default(); + builder.para_id = para_id; + let mut ccr = builder.build(); + + if Some(idx) == code_upgrade_index { + ccr.commitments.new_validation_code = Some(vec![1, 2, 3, 4].into()); + } + + ccr.commitments.processed_downward_messages = idx as u32; + let core_index = start_core_index + idx; + + BackedCandidate::new( + ccr.into(), + Default::default(), + Default::default(), + Some(CoreIndex(core_index as u32)), + ) + }) + .collect::>() + } + + // Ensure that overweight parachain inherents are always rejected by the runtime. + // Runtime should panic and return `InherentOverweight` error. + #[test] + fn test_backed_candidates_apply_weight_works_for_elastic_scaling() { + new_test_ext(MockGenesisConfig::default()).execute_with(|| { + let seed = [ + 1, 0, 52, 0, 0, 0, 0, 0, 1, 0, 10, 0, 22, 32, 0, 0, 2, 0, 55, 49, 0, 11, 0, 0, 3, + 0, 0, 0, 0, 0, 2, 92, + ]; + let mut rng = rand_chacha::ChaChaRng::from_seed(seed); + + // Create an overweight inherent and oversized block + let mut backed_and_concluding = BTreeMap::new(); + + for i in 0..30 { + backed_and_concluding.insert(i, i); + } + + let scenario = make_inherent_data(TestConfig { + dispute_statements: Default::default(), + dispute_sessions: vec![], // 3 cores with disputes + backed_and_concluding, + num_validators_per_core: 5, + code_upgrade: None, + fill_claimqueue: false, + }); + + let mut para_inherent_data = scenario.data.clone(); + + // Check the para inherent data is as expected: + // * 1 bitfield per validator (5 validators per core, 30 backed candidates, 0 disputes + // => 5*30 = 150) + assert_eq!(para_inherent_data.bitfields.len(), 150); + // * 30 backed candidates + assert_eq!(para_inherent_data.backed_candidates.len(), 30); + + let mut input_candidates = + build_backed_candidate_chain(ParaId::from(1000), 3, 0, Some(1)); + let chained_candidates_weight = backed_candidates_weight::(&input_candidates); + + input_candidates.append(&mut para_inherent_data.backed_candidates); + let input_bitfields = para_inherent_data.bitfields; + + // Test if weight insufficient even for 1 candidate (which doesn't contain a code + // upgrade). + let max_weight = backed_candidate_weight::(&input_candidates[0]) + + signed_bitfields_weight::(&input_bitfields); + let mut backed_candidates = input_candidates.clone(); + let mut bitfields = input_bitfields.clone(); + apply_weight_limit::( + &mut backed_candidates, + &mut bitfields, + max_weight, + &mut rng, + ); + + // The chained candidates are not picked, instead a single other candidate is picked + assert_eq!(backed_candidates.len(), 1); + assert_ne!(backed_candidates[0].descriptor().para_id, ParaId::from(1000)); + + // All bitfields are kept. + assert_eq!(bitfields.len(), 150); + + // Test if para_id 1000 chained candidates make it if there is enough room for its 3 + // candidates. + let max_weight = + chained_candidates_weight + signed_bitfields_weight::(&input_bitfields); + let mut backed_candidates = input_candidates.clone(); + let mut bitfields = input_bitfields.clone(); + apply_weight_limit::( + &mut backed_candidates, + &mut bitfields, + max_weight, + &mut rng, + ); + + // Only the chained candidates should pass filter. + assert_eq!(backed_candidates.len(), 3); + // Check the actual candidates + assert_eq!(backed_candidates[0].descriptor().para_id, ParaId::from(1000)); + assert_eq!(backed_candidates[1].descriptor().para_id, ParaId::from(1000)); + assert_eq!(backed_candidates[2].descriptor().para_id, ParaId::from(1000)); + + // All bitfields are kept. + assert_eq!(bitfields.len(), 150); + }); + } + // Ensure that overweight parachain inherents are always rejected by the runtime. // Runtime should panic and return `InherentOverweight` error. #[test] @@ -980,6 +1126,7 @@ mod sanitizers { AvailabilityBitfield, GroupIndex, Hash, Id as ParaId, SignedAvailabilityBitfield, ValidatorIndex, }; + use rstest::rstest; use sp_core::crypto::UncheckedFrom; use crate::mock::Test; @@ -1238,12 +1385,13 @@ mod sanitizers { // Backed candidates and scheduled parachains used for `sanitize_backed_candidates` testing struct TestData { backed_candidates: Vec, - scheduled_paras: BTreeMap, + all_backed_candidates_with_core: Vec<(BackedCandidate, CoreIndex)>, + scheduled_paras: BTreeMap>, } // Generate test data for the candidates and assert that the evnironment is set as expected // (check the comments for details) - fn get_test_data() -> TestData { + fn get_test_data(core_index_enabled: bool) -> TestData { const RELAY_PARENT_NUM: u32 = 3; // Add the relay parent to `shared` pallet. Otherwise some code (e.g. filtering backing @@ -1285,9 +1433,14 @@ mod sanitizers { shared::Pallet::::set_active_validators_ascending(validator_ids); // Two scheduled parachains - ParaId(1) on CoreIndex(0) and ParaId(2) on CoreIndex(1) - let scheduled = (0_usize..2) + let scheduled: BTreeMap> = (0_usize..2) .into_iter() - .map(|idx| (ParaId::from(1_u32 + idx as u32), CoreIndex::from(idx as u32))) + .map(|idx| { + ( + ParaId::from(1_u32 + idx as u32), + [CoreIndex::from(idx as u32)].into_iter().collect(), + ) + }) .collect::>(); // Set the validator groups in `scheduler` @@ -1301,7 +1454,7 @@ mod sanitizers { ( CoreIndex::from(0), VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(1) }, + Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(0) }, RELAY_PARENT_NUM, )]), ), @@ -1319,12 +1472,12 @@ mod sanitizers { match group_index { group_index if group_index == GroupIndex::from(0) => Some(vec![0, 1]), group_index if group_index == GroupIndex::from(1) => Some(vec![2, 3]), - _ => panic!("Group index out of bounds for 2 parachains and 1 parathread core"), + _ => panic!("Group index out of bounds"), } .map(|m| m.into_iter().map(ValidatorIndex).collect::>()) }; - // Two backed candidates from each parachain + // One backed candidate from each parachain let backed_candidates = (0_usize..2) .into_iter() .map(|idx0| { @@ -1348,6 +1501,7 @@ mod sanitizers { &keystore, &signing_context, BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(idx0 as u32)), ); backed }) @@ -1369,13 +1523,373 @@ mod sanitizers { ] ); - TestData { backed_candidates, scheduled_paras: scheduled } + let all_backed_candidates_with_core = backed_candidates + .iter() + .map(|candidate| { + // Only one entry for this test data. + ( + candidate.clone(), + scheduled + .get(&candidate.descriptor().para_id) + .unwrap() + .first() + .copied() + .unwrap(), + ) + }) + .collect(); + + TestData { + backed_candidates, + scheduled_paras: scheduled, + all_backed_candidates_with_core, + } + } + + // Generate test data for the candidates and assert that the evnironment is set as expected + // (check the comments for details) + // Para 1 scheduled on core 0 and core 1. Two candidates are supplied. + // Para 2 scheduled on cores 2 and 3. One candidate supplied. + // Para 3 scheduled on core 4. One candidate supplied. + // Para 4 scheduled on core 5. Two candidates supplied. + // Para 5 scheduled on core 6. No candidates supplied. + fn get_test_data_multiple_cores_per_para(core_index_enabled: bool) -> TestData { + const RELAY_PARENT_NUM: u32 = 3; + + // Add the relay parent to `shared` pallet. Otherwise some code (e.g. filtering backing + // votes) won't behave correctly + shared::Pallet::::add_allowed_relay_parent( + default_header().hash(), + Default::default(), + RELAY_PARENT_NUM, + 1, + ); + + let header = default_header(); + let relay_parent = header.hash(); + let session_index = SessionIndex::from(0_u32); + + let keystore = LocalKeystore::in_memory(); + let keystore = Arc::new(keystore) as KeystorePtr; + let signing_context = SigningContext { parent_hash: relay_parent, session_index }; + + let validators = vec![ + keyring::Sr25519Keyring::Alice, + keyring::Sr25519Keyring::Bob, + keyring::Sr25519Keyring::Charlie, + keyring::Sr25519Keyring::Dave, + keyring::Sr25519Keyring::Eve, + keyring::Sr25519Keyring::Ferdie, + keyring::Sr25519Keyring::One, + ]; + for validator in validators.iter() { + Keystore::sr25519_generate_new( + &*keystore, + PARACHAIN_KEY_TYPE_ID, + Some(&validator.to_seed()), + ) + .unwrap(); + } + + // Set active validators in `shared` pallet + let validator_ids = + validators.iter().map(|v| v.public().into()).collect::>(); + shared::Pallet::::set_active_validators_ascending(validator_ids); + + // Set the validator groups in `scheduler` + scheduler::Pallet::::set_validator_groups(vec![ + vec![ValidatorIndex(0)], + vec![ValidatorIndex(1)], + vec![ValidatorIndex(2)], + vec![ValidatorIndex(3)], + vec![ValidatorIndex(4)], + vec![ValidatorIndex(5)], + vec![ValidatorIndex(6)], + ]); + + // Update scheduler's claimqueue with the parachains + scheduler::Pallet::::set_claimqueue(BTreeMap::from([ + ( + CoreIndex::from(0), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(0) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(1), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(1) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(2), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(2) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(3), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(3) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(4), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 3.into(), core_index: CoreIndex(4) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(5), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 4.into(), core_index: CoreIndex(5) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(6), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 5.into(), core_index: CoreIndex(6) }, + RELAY_PARENT_NUM, + )]), + ), + ])); + + // Callback used for backing candidates + let group_validators = |group_index: GroupIndex| { + match group_index { + group_index if group_index == GroupIndex::from(0) => Some(vec![0]), + group_index if group_index == GroupIndex::from(1) => Some(vec![1]), + group_index if group_index == GroupIndex::from(2) => Some(vec![2]), + group_index if group_index == GroupIndex::from(3) => Some(vec![3]), + group_index if group_index == GroupIndex::from(4) => Some(vec![4]), + group_index if group_index == GroupIndex::from(5) => Some(vec![5]), + group_index if group_index == GroupIndex::from(6) => Some(vec![6]), + + _ => panic!("Group index out of bounds"), + } + .map(|m| m.into_iter().map(ValidatorIndex).collect::>()) + }; + + let mut backed_candidates = vec![]; + let mut all_backed_candidates_with_core = vec![]; + + // Para 1 + { + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(1), + relay_parent, + pov_hash: Hash::repeat_byte(1 as u8), + persisted_validation_data_hash: [42u8; 32].into(), + hrmp_watermark: RELAY_PARENT_NUM, + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed: BackedCandidate = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(0 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(0 as u32)), + ); + backed_candidates.push(backed.clone()); + if core_index_enabled { + all_backed_candidates_with_core.push((backed, CoreIndex(0))); + } + + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(1), + relay_parent, + pov_hash: Hash::repeat_byte(2 as u8), + persisted_validation_data_hash: [42u8; 32].into(), + hrmp_watermark: RELAY_PARENT_NUM, + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(1 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(1 as u32)), + ); + backed_candidates.push(backed.clone()); + if core_index_enabled { + all_backed_candidates_with_core.push((backed, CoreIndex(1))); + } + } + + // Para 2 + { + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(2), + relay_parent, + pov_hash: Hash::repeat_byte(3 as u8), + persisted_validation_data_hash: [42u8; 32].into(), + hrmp_watermark: RELAY_PARENT_NUM, + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(2 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(2 as u32)), + ); + backed_candidates.push(backed.clone()); + if core_index_enabled { + all_backed_candidates_with_core.push((backed, CoreIndex(2))); + } + } + + // Para 3 + { + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(3), + relay_parent, + pov_hash: Hash::repeat_byte(4 as u8), + persisted_validation_data_hash: [42u8; 32].into(), + hrmp_watermark: RELAY_PARENT_NUM, + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(4 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(4 as u32)), + ); + backed_candidates.push(backed.clone()); + all_backed_candidates_with_core.push((backed, CoreIndex(4))); + } + + // Para 4 + { + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(4), + relay_parent, + pov_hash: Hash::repeat_byte(5 as u8), + persisted_validation_data_hash: [42u8; 32].into(), + hrmp_watermark: RELAY_PARENT_NUM, + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(5 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + None, + ); + backed_candidates.push(backed.clone()); + all_backed_candidates_with_core.push((backed, CoreIndex(5))); + + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(4), + relay_parent, + pov_hash: Hash::repeat_byte(6 as u8), + persisted_validation_data_hash: [42u8; 32].into(), + hrmp_watermark: RELAY_PARENT_NUM, + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(5 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(5 as u32)), + ); + backed_candidates.push(backed.clone()); + } + + // No candidate for para 5. + + // State sanity checks + assert_eq!( + >::scheduled_paras().collect::>(), + vec![ + (CoreIndex(0), ParaId::from(1)), + (CoreIndex(1), ParaId::from(1)), + (CoreIndex(2), ParaId::from(2)), + (CoreIndex(3), ParaId::from(2)), + (CoreIndex(4), ParaId::from(3)), + (CoreIndex(5), ParaId::from(4)), + (CoreIndex(6), ParaId::from(5)), + ] + ); + let mut scheduled: BTreeMap> = BTreeMap::new(); + for (core_idx, para_id) in >::scheduled_paras() { + scheduled.entry(para_id).or_default().insert(core_idx); + } + + assert_eq!( + shared::Pallet::::active_validator_indices(), + vec![ + ValidatorIndex(0), + ValidatorIndex(1), + ValidatorIndex(2), + ValidatorIndex(3), + ValidatorIndex(4), + ValidatorIndex(5), + ValidatorIndex(6), + ] + ); + + TestData { + backed_candidates, + scheduled_paras: scheduled, + all_backed_candidates_with_core, + } } - #[test] - fn happy_path() { + #[rstest] + #[case(false)] + #[case(true)] + fn happy_path(#[case] core_index_enabled: bool) { new_test_ext(MockGenesisConfig::default()).execute_with(|| { - let TestData { backed_candidates, scheduled_paras: scheduled } = get_test_data(); + let TestData { + backed_candidates, + all_backed_candidates_with_core, + scheduled_paras: scheduled, + } = get_test_data(core_index_enabled); let has_concluded_invalid = |_idx: usize, _backed_candidate: &BackedCandidate| -> bool { false }; @@ -1385,47 +1899,95 @@ mod sanitizers { backed_candidates.clone(), &>::allowed_relay_parents(), has_concluded_invalid, - &scheduled + scheduled, + core_index_enabled ), SanitizedBackedCandidates { - backed_candidates, - votes_from_disabled_were_dropped: false + backed_candidates_with_core: all_backed_candidates_with_core, + votes_from_disabled_were_dropped: false, + dropped_unscheduled_candidates: false } ); + }); + } - {} + #[rstest] + #[case(false)] + #[case(true)] + fn test_with_multiple_cores_per_para(#[case] core_index_enabled: bool) { + new_test_ext(MockGenesisConfig::default()).execute_with(|| { + let TestData { + backed_candidates, + all_backed_candidates_with_core: expected_all_backed_candidates_with_core, + scheduled_paras: scheduled, + } = get_test_data_multiple_cores_per_para(core_index_enabled); + + let has_concluded_invalid = + |_idx: usize, _backed_candidate: &BackedCandidate| -> bool { false }; + + assert_eq!( + sanitize_backed_candidates::( + backed_candidates.clone(), + &>::allowed_relay_parents(), + has_concluded_invalid, + scheduled, + core_index_enabled + ), + SanitizedBackedCandidates { + backed_candidates_with_core: expected_all_backed_candidates_with_core, + votes_from_disabled_were_dropped: false, + dropped_unscheduled_candidates: true + } + ); }); } // nothing is scheduled, so no paraids match, thus all backed candidates are skipped - #[test] - fn nothing_scheduled() { + #[rstest] + #[case(false, false)] + #[case(true, true)] + #[case(false, true)] + #[case(true, false)] + fn nothing_scheduled( + #[case] core_index_enabled: bool, + #[case] multiple_cores_per_para: bool, + ) { new_test_ext(MockGenesisConfig::default()).execute_with(|| { - let TestData { backed_candidates, scheduled_paras: _ } = get_test_data(); - let scheduled = &BTreeMap::new(); + let TestData { backed_candidates, .. } = if multiple_cores_per_para { + get_test_data_multiple_cores_per_para(core_index_enabled) + } else { + get_test_data(core_index_enabled) + }; + let scheduled = BTreeMap::new(); let has_concluded_invalid = |_idx: usize, _backed_candidate: &BackedCandidate| -> bool { false }; let SanitizedBackedCandidates { - backed_candidates: sanitized_backed_candidates, + backed_candidates_with_core: sanitized_backed_candidates, votes_from_disabled_were_dropped, + dropped_unscheduled_candidates, } = sanitize_backed_candidates::( backed_candidates.clone(), &>::allowed_relay_parents(), has_concluded_invalid, - &scheduled, + scheduled, + core_index_enabled, ); assert!(sanitized_backed_candidates.is_empty()); assert!(!votes_from_disabled_were_dropped); + assert!(dropped_unscheduled_candidates); }); } // candidates that have concluded as invalid are filtered out - #[test] - fn invalid_are_filtered_out() { + #[rstest] + #[case(false)] + #[case(true)] + fn invalid_are_filtered_out(#[case] core_index_enabled: bool) { new_test_ext(MockGenesisConfig::default()).execute_with(|| { - let TestData { backed_candidates, scheduled_paras: scheduled } = get_test_data(); + let TestData { backed_candidates, scheduled_paras: scheduled, .. } = + get_test_data(core_index_enabled); // mark every second one as concluded invalid let set = { @@ -1440,45 +2002,55 @@ mod sanitizers { let has_concluded_invalid = |_idx: usize, candidate: &BackedCandidate| set.contains(&candidate.hash()); let SanitizedBackedCandidates { - backed_candidates: sanitized_backed_candidates, + backed_candidates_with_core: sanitized_backed_candidates, votes_from_disabled_were_dropped, + dropped_unscheduled_candidates, } = sanitize_backed_candidates::( backed_candidates.clone(), &>::allowed_relay_parents(), has_concluded_invalid, - &scheduled, + scheduled, + core_index_enabled, ); assert_eq!(sanitized_backed_candidates.len(), backed_candidates.len() / 2); assert!(!votes_from_disabled_were_dropped); + assert!(!dropped_unscheduled_candidates); }); } - #[test] - fn disabled_non_signing_validator_doesnt_get_filtered() { + #[rstest] + #[case(false)] + #[case(true)] + fn disabled_non_signing_validator_doesnt_get_filtered(#[case] core_index_enabled: bool) { new_test_ext(MockGenesisConfig::default()).execute_with(|| { - let TestData { mut backed_candidates, scheduled_paras } = get_test_data(); + let TestData { mut all_backed_candidates_with_core, .. } = + get_test_data(core_index_enabled); // Disable Eve set_disabled_validators(vec![4]); - let before = backed_candidates.clone(); + let before = all_backed_candidates_with_core.clone(); // Eve is disabled but no backing statement is signed by it so nothing should be // filtered assert!(!filter_backed_statements_from_disabled_validators::( - &mut backed_candidates, + &mut all_backed_candidates_with_core, &>::allowed_relay_parents(), - &scheduled_paras + core_index_enabled )); - assert_eq!(backed_candidates, before); + assert_eq!(all_backed_candidates_with_core, before); }); } - - #[test] - fn drop_statements_from_disabled_without_dropping_candidate() { + #[rstest] + #[case(false)] + #[case(true)] + fn drop_statements_from_disabled_without_dropping_candidate( + #[case] core_index_enabled: bool, + ) { new_test_ext(MockGenesisConfig::default()).execute_with(|| { - let TestData { mut backed_candidates, scheduled_paras } = get_test_data(); + let TestData { mut all_backed_candidates_with_core, .. } = + get_test_data(core_index_enabled); // Disable Alice set_disabled_validators(vec![0]); @@ -1491,61 +2063,83 @@ mod sanitizers { configuration::Pallet::::force_set_active_config(hc); // Verify the initial state is as expected - assert_eq!(backed_candidates.get(0).unwrap().validity_votes.len(), 2); - assert_eq!( - backed_candidates.get(0).unwrap().validator_indices.get(0).unwrap(), - true - ); assert_eq!( - backed_candidates.get(0).unwrap().validator_indices.get(1).unwrap(), - true + all_backed_candidates_with_core.get(0).unwrap().0.validity_votes().len(), + 2 ); - let untouched = backed_candidates.get(1).unwrap().clone(); + let (validator_indices, maybe_core_index) = all_backed_candidates_with_core + .get(0) + .unwrap() + .0 + .validator_indices_and_core_index(core_index_enabled); + if core_index_enabled { + assert!(maybe_core_index.is_some()); + } else { + assert!(maybe_core_index.is_none()); + } + + assert_eq!(validator_indices.get(0).unwrap(), true); + assert_eq!(validator_indices.get(1).unwrap(), true); + let untouched = all_backed_candidates_with_core.get(1).unwrap().0.clone(); assert!(filter_backed_statements_from_disabled_validators::( - &mut backed_candidates, + &mut all_backed_candidates_with_core, &>::allowed_relay_parents(), - &scheduled_paras + core_index_enabled )); + let (validator_indices, maybe_core_index) = all_backed_candidates_with_core + .get(0) + .unwrap() + .0 + .validator_indices_and_core_index(core_index_enabled); + if core_index_enabled { + assert!(maybe_core_index.is_some()); + } else { + assert!(maybe_core_index.is_none()); + } + // there should still be two backed candidates - assert_eq!(backed_candidates.len(), 2); + assert_eq!(all_backed_candidates_with_core.len(), 2); // but the first one should have only one validity vote - assert_eq!(backed_candidates.get(0).unwrap().validity_votes.len(), 1); - // Validator 0 vote should be dropped, validator 1 - retained - assert_eq!( - backed_candidates.get(0).unwrap().validator_indices.get(0).unwrap(), - false - ); assert_eq!( - backed_candidates.get(0).unwrap().validator_indices.get(1).unwrap(), - true + all_backed_candidates_with_core.get(0).unwrap().0.validity_votes().len(), + 1 ); + // Validator 0 vote should be dropped, validator 1 - retained + assert_eq!(validator_indices.get(0).unwrap(), false); + assert_eq!(validator_indices.get(1).unwrap(), true); // the second candidate shouldn't be modified - assert_eq!(*backed_candidates.get(1).unwrap(), untouched); + assert_eq!(all_backed_candidates_with_core.get(1).unwrap().0, untouched); }); } - #[test] - fn drop_candidate_if_all_statements_are_from_disabled() { + #[rstest] + #[case(false)] + #[case(true)] + fn drop_candidate_if_all_statements_are_from_disabled(#[case] core_index_enabled: bool) { new_test_ext(MockGenesisConfig::default()).execute_with(|| { - let TestData { mut backed_candidates, scheduled_paras } = get_test_data(); + let TestData { mut all_backed_candidates_with_core, .. } = + get_test_data(core_index_enabled); // Disable Alice and Bob set_disabled_validators(vec![0, 1]); // Verify the initial state is as expected - assert_eq!(backed_candidates.get(0).unwrap().validity_votes.len(), 2); - let untouched = backed_candidates.get(1).unwrap().clone(); + assert_eq!( + all_backed_candidates_with_core.get(0).unwrap().0.validity_votes().len(), + 2 + ); + let untouched = all_backed_candidates_with_core.get(1).unwrap().0.clone(); assert!(filter_backed_statements_from_disabled_validators::( - &mut backed_candidates, + &mut all_backed_candidates_with_core, &>::allowed_relay_parents(), - &scheduled_paras + core_index_enabled )); - assert_eq!(backed_candidates.len(), 1); - assert_eq!(*backed_candidates.get(0).unwrap(), untouched); + assert_eq!(all_backed_candidates_with_core.len(), 1); + assert_eq!(all_backed_candidates_with_core.get(0).unwrap().0, untouched); }); } } diff --git a/polkadot/runtime/parachains/src/paras_inherent/weights.rs b/polkadot/runtime/parachains/src/paras_inherent/weights.rs index 05cc53fae04652c188d62b57fa5281aa6bb88e22..0f4e5be572a66d16c1476ada7671c495a27bbc83 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/weights.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/weights.rs @@ -149,11 +149,11 @@ pub fn backed_candidate_weight( candidate: &BackedCandidate, ) -> Weight { set_proof_size_to_tx_size( - if candidate.candidate.commitments.new_validation_code.is_some() { + if candidate.candidate().commitments.new_validation_code.is_some() { <::WeightInfo as WeightInfo>::enter_backed_candidate_code_upgrade() } else { <::WeightInfo as WeightInfo>::enter_backed_candidates_variable( - candidate.validity_votes.len() as u32, + candidate.validity_votes().len() as u32, ) }, candidate, diff --git a/polkadot/runtime/parachains/src/scheduler.rs b/polkadot/runtime/parachains/src/scheduler.rs index 3dbb050fb4e5e40dc6f7b512bf55c45368a3ac5d..f231864a85e7261d999c1c1ec34679bb0825d56c 100644 --- a/polkadot/runtime/parachains/src/scheduler.rs +++ b/polkadot/runtime/parachains/src/scheduler.rs @@ -37,7 +37,7 @@ //! availability cores over time. use crate::{configuration, initializer::SessionChangeNotification, paras}; -use frame_support::pallet_prelude::*; +use frame_support::{pallet_prelude::*, traits::Defensive}; use frame_system::pallet_prelude::BlockNumberFor; pub use polkadot_core_primitives::v2::BlockNumber; use primitives::{ @@ -51,7 +51,7 @@ use sp_std::{ pub mod common; -use common::{Assignment, AssignmentProvider, AssignmentProviderConfig}; +use common::{Assignment, AssignmentProvider}; pub use pallet::*; @@ -59,6 +59,7 @@ pub use pallet::*; mod tests; const LOG_TARGET: &str = "runtime::parachains::scheduler"; + pub mod migration; #[frame_support::pallet] @@ -88,10 +89,8 @@ pub mod pallet { #[pallet::getter(fn validator_groups)] pub(crate) type ValidatorGroups = StorageValue<_, Vec>, ValueQuery>; - /// One entry for each availability core. Entries are `None` if the core is not currently - /// occupied. Can be temporarily `Some` if scheduled but not occupied. - /// The i'th parachain belongs to the i'th core, with the remaining cores all being - /// parathread-multiplexers. + /// One entry for each availability core. The i'th parachain belongs to the i'th core, with the + /// remaining cores all being on demand parachain multiplexers. /// /// Bounded by the maximum of either of these two values: /// * The number of parachains and parathread multiplexers @@ -146,7 +145,7 @@ pub mod pallet { /// One entry for each availability core. The `VecDeque` represents the assignments to be /// scheduled on that core. The value contained here will not be valid after the end of - /// a block. Runtime APIs should be used to determine scheduled cores/ for the upcoming block. + /// a block. Runtime APIs should be used to determine scheduled cores for the upcoming block. #[pallet::storage] #[pallet::getter(fn claimqueue)] pub(crate) type ClaimQueue = @@ -222,7 +221,7 @@ impl Pallet { let n_cores = core::cmp::max( T::AssignmentProvider::session_core_count(), - match config.max_validators_per_core { + match config.scheduler_params.max_validators_per_core { Some(x) if x != 0 => validators.len() as u32 / x, _ => 0, }, @@ -236,8 +235,16 @@ impl Pallet { if n_cores == 0 || validators.is_empty() { ValidatorGroups::::set(Vec::new()); } else { - let group_base_size = validators.len() / n_cores as usize; - let n_larger_groups = validators.len() % n_cores as usize; + let group_base_size = validators + .len() + .checked_div(n_cores as usize) + .defensive_proof("n_cores should not be 0") + .unwrap_or(0); + let n_larger_groups = validators + .len() + .checked_rem(n_cores as usize) + .defensive_proof("n_cores should not be 0") + .unwrap_or(0); // Groups contain indices into the validators from the session change notification, // which are already shuffled. @@ -350,6 +357,7 @@ impl Pallet { fn drop_expired_claims_from_claimqueue() { let now = >::block_number(); let availability_cores = AvailabilityCores::::get(); + let ttl = >::config().scheduler_params.ttl; ClaimQueue::::mutate(|cq| { for (idx, _) in (0u32..).zip(availability_cores) { @@ -382,8 +390,6 @@ impl Pallet { if let Some(assignment) = T::AssignmentProvider::pop_assignment_for_core(core_idx) { - let AssignmentProviderConfig { ttl, .. } = - T::AssignmentProvider::get_provider_config(core_idx); core_claimqueue.push_back(ParasEntry::new(assignment, now + ttl)); } } @@ -428,7 +434,7 @@ impl Pallet { } let rotations_since_session_start: BlockNumberFor = - (at - session_start_block) / config.group_rotation_frequency; + (at - session_start_block) / config.scheduler_params.group_rotation_frequency; let rotations_since_session_start = as TryInto>::try_into(rotations_since_session_start) @@ -460,9 +466,9 @@ impl Pallet { // Note: blocks backed in this rotation will never time out here as backed_in + // config.paras_availability_period will always be > now for these blocks, as // otherwise above condition would not be true. - pending_since + config.paras_availability_period + pending_since + config.scheduler_params.paras_availability_period } else { - next_rotation + config.paras_availability_period + next_rotation + config.scheduler_params.paras_availability_period }; AvailabilityTimeoutStatus { timed_out: time_out_at <= now, live_until: time_out_at } @@ -478,7 +484,8 @@ impl Pallet { let now = >::block_number() + One::one(); let rotation_info = Self::group_rotation_info(now); - let current_window = rotation_info.last_rotation_at() + config.paras_availability_period; + let current_window = + rotation_info.last_rotation_at() + config.scheduler_params.paras_availability_period; now < current_window } @@ -488,7 +495,7 @@ impl Pallet { ) -> GroupRotationInfo> { let session_start_block = Self::session_start_block(); let group_rotation_frequency = - >::config().group_rotation_frequency; + >::config().scheduler_params.group_rotation_frequency; GroupRotationInfo { session_start_block, now, group_rotation_frequency } } @@ -508,6 +515,8 @@ impl Pallet { /// Return the next thing that will be scheduled on this core assuming it is currently /// occupied and the candidate occupying it times out. pub(crate) fn next_up_on_time_out(core: CoreIndex) -> Option { + let max_availability_timeouts = + >::config().scheduler_params.max_availability_timeouts; Self::next_up_on_available(core).or_else(|| { // Or, if none, the claim currently occupying the core, // as it would be put back on the queue after timing out if number of retries is not at @@ -515,16 +524,12 @@ impl Pallet { let cores = AvailabilityCores::::get(); cores.get(core.0 as usize).and_then(|c| match c { CoreOccupied::Free => None, - CoreOccupied::Paras(pe) => { - let AssignmentProviderConfig { max_availability_timeouts, .. } = - T::AssignmentProvider::get_provider_config(core); - + CoreOccupied::Paras(pe) => if pe.availability_timeouts < max_availability_timeouts { Some(Self::paras_entry_to_scheduled_core(pe)) } else { None - } - }, + }, }) }) } @@ -566,7 +571,7 @@ impl Pallet { // ClaimQueue related functions // fn claimqueue_lookahead() -> u32 { - >::config().scheduling_lookahead + >::config().scheduler_params.lookahead } /// Frees cores and fills the free claimqueue spots by popping from the `AssignmentProvider`. @@ -585,15 +590,15 @@ impl Pallet { let n_lookahead = Self::claimqueue_lookahead().max(1); let n_session_cores = T::AssignmentProvider::session_core_count(); let cq = ClaimQueue::::get(); - let ttl = >::config().on_demand_ttl; + let config = >::config(); + let max_availability_timeouts = config.scheduler_params.max_availability_timeouts; + let ttl = config.scheduler_params.ttl; for core_idx in 0..n_session_cores { let core_idx = CoreIndex::from(core_idx); // add previously timedout paras back into the queue if let Some(mut entry) = timedout_paras.remove(&core_idx) { - let AssignmentProviderConfig { max_availability_timeouts, .. } = - T::AssignmentProvider::get_provider_config(core_idx); if entry.availability_timeouts < max_availability_timeouts { // Increment the timeout counter. entry.availability_timeouts += 1; @@ -668,13 +673,6 @@ impl Pallet { .filter_map(|(core_idx, v)| v.front().map(|e| (core_idx, e.assignment.para_id()))) } - #[cfg(any(feature = "runtime-benchmarks", test))] - pub(crate) fn assignment_provider_config( - core_idx: CoreIndex, - ) -> AssignmentProviderConfig> { - T::AssignmentProvider::get_provider_config(core_idx) - } - #[cfg(any(feature = "try-runtime", test))] fn claimqueue_len() -> usize { ClaimQueue::::get().iter().map(|la_vec| la_vec.1.len()).sum() diff --git a/polkadot/runtime/parachains/src/scheduler/common.rs b/polkadot/runtime/parachains/src/scheduler/common.rs index 2eb73385803c6e62a88fc55526e3ba18218cf06d..66a4e6d30be0830650295d5311b7d7e1fc3b1d20 100644 --- a/polkadot/runtime/parachains/src/scheduler/common.rs +++ b/polkadot/runtime/parachains/src/scheduler/common.rs @@ -48,22 +48,10 @@ impl Assignment { } } -#[derive(Encode, Decode, TypeInfo)] -/// A set of variables required by the scheduler in order to operate. -pub struct AssignmentProviderConfig { - /// How many times a collation can time out on availability. - /// Zero timeouts still means that a collation can be provided as per the slot auction - /// assignment provider. - pub max_availability_timeouts: u32, - - /// How long the collator has to provide a collation to the backing group before being dropped. - pub ttl: BlockNumber, -} - pub trait AssignmentProvider { /// Pops an [`Assignment`] from the provider for a specified [`CoreIndex`]. /// - /// This is where assignments come into existance. + /// This is where assignments come into existence. fn pop_assignment_for_core(core_idx: CoreIndex) -> Option; /// A previously popped `Assignment` has been fully processed. @@ -77,14 +65,11 @@ pub trait AssignmentProvider { /// Push back a previously popped assignment. /// /// If the assignment could not be processed within the current session, it can be pushed back - /// to the assignment provider in order to be poppped again later. + /// to the assignment provider in order to be popped again later. /// /// This is the second way the life of an assignment can come to an end. fn push_back_assignment(assignment: Assignment); - /// Returns a set of variables needed by the scheduler - fn get_provider_config(core_idx: CoreIndex) -> AssignmentProviderConfig; - /// Push some assignment for mocking/benchmarks purposes. /// /// Useful for benchmarks and testing. The returned assignment is "valid" and can if need be diff --git a/polkadot/runtime/parachains/src/scheduler/tests.rs b/polkadot/runtime/parachains/src/scheduler/tests.rs index 9af23ce64bd67ab0901dd1a03e849d51cfffe342..e6a1e66b3a531906fa6cbf763f248f6f6108a0f3 100644 --- a/polkadot/runtime/parachains/src/scheduler/tests.rs +++ b/polkadot/runtime/parachains/src/scheduler/tests.rs @@ -18,7 +18,9 @@ use super::*; use frame_support::assert_ok; use keyring::Sr25519Keyring; -use primitives::{BlockNumber, SessionIndex, ValidationCode, ValidatorId}; +use primitives::{ + vstaging::SchedulerParams, BlockNumber, SessionIndex, ValidationCode, ValidatorId, +}; use sp_std::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; use crate::{ @@ -103,15 +105,19 @@ fn run_to_end_of_block( fn default_config() -> HostConfiguration { HostConfiguration { - coretime_cores: 3, - group_rotation_frequency: 10, - paras_availability_period: 3, - scheduling_lookahead: 2, // This field does not affect anything that scheduler does. However, `HostConfiguration` // is still a subject to consistency test. It requires that // `minimum_validation_upgrade_delay` is greater than `chain_availability_period` and // `thread_availability_period`. minimum_validation_upgrade_delay: 6, + scheduler_params: SchedulerParams { + group_rotation_frequency: 10, + paras_availability_period: 3, + lookahead: 2, + num_cores: 3, + max_availability_timeouts: 1, + ..Default::default() + }, ..Default::default() } } @@ -155,7 +161,7 @@ fn scheduled_entries() -> impl Iterator(vec![para_id])); // Add a claim on core 0 with a ttl == now (17) let paras_entry_non_expired = ParasEntry::new(Assignment::Bulk(para_id), now); @@ -217,7 +223,7 @@ fn claimqueue_ttl_drop_fn_works() { Scheduler::add_to_claimqueue(core_idx, paras_entry_expired.clone()); Scheduler::add_to_claimqueue(core_idx, paras_entry_non_expired.clone()); let cq = Scheduler::claimqueue(); - assert!(cq.get(&core_idx).unwrap().len() == 3); + assert_eq!(cq.get(&core_idx).unwrap().len(), 3); // Add a claim to the test assignment provider. let assignment = Assignment::Bulk(para_id); @@ -229,8 +235,9 @@ fn claimqueue_ttl_drop_fn_works() { let cq = Scheduler::claimqueue(); let cqc = cq.get(&core_idx).unwrap(); - // Same number of claims - assert!(cqc.len() == 3); + // Same number of claims, because a new claim is popped from `MockAssigner` instead of the + // expired one + assert_eq!(cqc.len(), 3); // The first 2 claims in the queue should have a ttl of 17, // being the ones set up prior in this test as claims 1 and 3. @@ -290,7 +297,7 @@ fn session_change_shuffles_validators() { fn session_change_takes_only_max_per_core() { let config = { let mut config = default_config(); - config.max_validators_per_core = Some(1); + config.scheduler_params.max_validators_per_core = Some(1); config }; @@ -330,7 +337,8 @@ fn session_change_takes_only_max_per_core() { #[test] fn fill_claimqueue_fills() { - let genesis_config = genesis_config(&default_config()); + let config = default_config(); + let genesis_config = genesis_config(&config); let para_a = ParaId::from(3_u32); let para_b = ParaId::from(4_u32); @@ -342,22 +350,20 @@ fn fill_claimqueue_fills() { new_test_ext(genesis_config).execute_with(|| { MockAssigner::set_core_count(2); - let AssignmentProviderConfig { ttl: config_ttl, .. } = - MockAssigner::get_provider_config(CoreIndex(0)); + let coretime_ttl = config.scheduler_params.ttl; // Add 3 paras schedule_blank_para(para_a); schedule_blank_para(para_b); schedule_blank_para(para_c); - // start a new session to activate, 3 validators for 3 cores. + // start a new session to activate, 2 validators for 2 cores. run_to_block(1, |number| match number { 1 => Some(SessionChangeNotification { new_config: default_config(), validators: vec![ ValidatorId::from(Sr25519Keyring::Alice.public()), ValidatorId::from(Sr25519Keyring::Bob.public()), - ValidatorId::from(Sr25519Keyring::Charlie.public()), ], ..Default::default() }), @@ -381,7 +387,7 @@ fn fill_claimqueue_fills() { &ParasEntry { assignment: assignment_a.clone(), availability_timeouts: 0, - ttl: 2 + config_ttl + ttl: 2 + coretime_ttl }, ); // Sits on the same core as `para_a` @@ -390,7 +396,7 @@ fn fill_claimqueue_fills() { ParasEntry { assignment: assignment_b.clone(), availability_timeouts: 0, - ttl: 2 + config_ttl + ttl: 2 + coretime_ttl } ); assert_eq!( @@ -398,7 +404,7 @@ fn fill_claimqueue_fills() { &ParasEntry { assignment: assignment_c.clone(), availability_timeouts: 0, - ttl: 2 + config_ttl + ttl: 2 + coretime_ttl }, ); } @@ -410,7 +416,7 @@ fn schedule_schedules_including_just_freed() { let mut config = default_config(); // NOTE: This test expects on demand cores to each get slotted on to a different core // and not fill up the claimqueue of each core first. - config.scheduling_lookahead = 1; + config.scheduler_params.lookahead = 1; let genesis_config = genesis_config(&config); let para_a = ParaId::from(3_u32); @@ -479,7 +485,7 @@ fn schedule_schedules_including_just_freed() { // All `core_queue`s should be empty Scheduler::claimqueue() .iter() - .for_each(|(_core_idx, core_queue)| assert!(core_queue.len() == 0)) + .for_each(|(_core_idx, core_queue)| assert_eq!(core_queue.len(), 0)) } // add a couple more para claims - the claim on `b` will go to the 3rd core @@ -557,7 +563,7 @@ fn schedule_schedules_including_just_freed() { #[test] fn schedule_clears_availability_cores() { let mut config = default_config(); - config.scheduling_lookahead = 1; + config.scheduler_params.lookahead = 1; let genesis_config = genesis_config(&config); let para_a = ParaId::from(1_u32); @@ -659,11 +665,11 @@ fn schedule_clears_availability_cores() { fn schedule_rotates_groups() { let config = { let mut config = default_config(); - config.scheduling_lookahead = 1; + config.scheduler_params.lookahead = 1; config }; - let rotation_frequency = config.group_rotation_frequency; + let rotation_frequency = config.scheduler_params.group_rotation_frequency; let on_demand_cores = 2; let genesis_config = genesis_config(&config); @@ -742,9 +748,12 @@ fn schedule_rotates_groups() { #[test] fn on_demand_claims_are_pruned_after_timing_out() { - let max_retries = 20; + let max_timeouts = 20; let mut config = default_config(); - config.scheduling_lookahead = 1; + config.scheduler_params.lookahead = 1; + // Need more timeouts for this test + config.scheduler_params.max_availability_timeouts = max_timeouts; + config.scheduler_params.ttl = BlockNumber::from(5u32); let genesis_config = genesis_config(&config); let para_a = ParaId::from(1_u32); @@ -753,11 +762,6 @@ fn on_demand_claims_are_pruned_after_timing_out() { new_test_ext(genesis_config).execute_with(|| { MockAssigner::set_core_count(2); - // Need more timeouts for this test - MockAssigner::set_assignment_provider_config(AssignmentProviderConfig { - max_availability_timeouts: max_retries, - ttl: BlockNumber::from(5u32), - }); schedule_blank_para(para_a); // #1 @@ -794,7 +798,7 @@ fn on_demand_claims_are_pruned_after_timing_out() { // Run to block #n over the max_retries value. // In this case, both validator groups with time out on availability and // the assignment will be dropped. - for n in now..=(now + max_retries + 1) { + for n in now..=(now + max_timeouts + 1) { // #n run_to_block(n, |_| None); // Time out on core 0. @@ -806,7 +810,7 @@ fn on_demand_claims_are_pruned_after_timing_out() { Scheduler::free_cores_and_fill_claimqueue(just_updated, now); // ParaId a exists in the claim queue until max_retries is reached. - if n < max_retries + now { + if n < max_timeouts + now { assert!(claimqueue_contains_para_ids::(vec![para_a])); } else { assert!(!claimqueue_contains_para_ids::(vec![para_a])); @@ -822,7 +826,7 @@ fn on_demand_claims_are_pruned_after_timing_out() { assert!(!availability_cores_contains_para_ids::(vec![para_a])); // #25 - now += max_retries + 2; + now += max_timeouts + 2; // Add assignment back to the mix. MockAssigner::add_test_assignment(assignment_a.clone()); @@ -833,7 +837,7 @@ fn on_demand_claims_are_pruned_after_timing_out() { // #26 now += 1; // Run to block #n but this time have group 1 conclude the availabilty. - for n in now..=(now + max_retries + 1) { + for n in now..=(now + max_timeouts + 1) { // #n run_to_block(n, |_| None); // Time out core 0 if group 0 is assigned to it, if group 1 is assigned, conclude. @@ -874,10 +878,8 @@ fn on_demand_claims_are_pruned_after_timing_out() { fn availability_predicate_works() { let genesis_config = genesis_config(&default_config()); - let HostConfiguration { group_rotation_frequency, paras_availability_period, .. } = - default_config(); - - assert!(paras_availability_period < group_rotation_frequency); + let SchedulerParams { group_rotation_frequency, paras_availability_period, .. } = + default_config().scheduler_params; new_test_ext(genesis_config).execute_with(|| { run_to_block(1 + paras_availability_period, |_| None); @@ -1044,7 +1046,7 @@ fn next_up_on_time_out_reuses_claim_if_nothing_queued() { #[test] fn session_change_requires_reschedule_dropping_removed_paras() { let mut config = default_config(); - config.scheduling_lookahead = 1; + config.scheduler_params.lookahead = 1; let genesis_config = genesis_config(&config); let para_a = ParaId::from(1_u32); @@ -1056,7 +1058,7 @@ fn session_change_requires_reschedule_dropping_removed_paras() { new_test_ext(genesis_config).execute_with(|| { // Setting explicit core count MockAssigner::set_core_count(5); - let assignment_provider_ttl = MockAssigner::get_provider_config(CoreIndex::from(0)).ttl; + let coretime_ttl = >::config().scheduler_params.ttl; schedule_blank_para(para_a); schedule_blank_para(para_b); @@ -1120,7 +1122,7 @@ fn session_change_requires_reschedule_dropping_removed_paras() { vec![ParasEntry::new( Assignment::Bulk(para_a), // At end of block 2 - assignment_provider_ttl + 2 + coretime_ttl + 2 )] .into_iter() .collect() @@ -1169,7 +1171,7 @@ fn session_change_requires_reschedule_dropping_removed_paras() { vec![ParasEntry::new( Assignment::Bulk(para_a), // At block 3 - assignment_provider_ttl + 3 + coretime_ttl + 3 )] .into_iter() .collect() @@ -1179,7 +1181,7 @@ fn session_change_requires_reschedule_dropping_removed_paras() { vec![ParasEntry::new( Assignment::Bulk(para_b), // At block 3 - assignment_provider_ttl + 3 + coretime_ttl + 3 )] .into_iter() .collect() diff --git a/polkadot/runtime/parachains/src/session_info/migration.rs b/polkadot/runtime/parachains/src/session_info/migration.rs index 228c1e3bb2515b14f5fe59a89578b03be065955d..ea6f81834b5db6eb7336521d3d471a6c3df49985 100644 --- a/polkadot/runtime/parachains/src/session_info/migration.rs +++ b/polkadot/runtime/parachains/src/session_info/migration.rs @@ -18,5 +18,5 @@ use frame_support::traits::StorageVersion; -/// The current storage version. +/// The in-code storage version. pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); diff --git a/polkadot/runtime/parachains/src/session_info/tests.rs b/polkadot/runtime/parachains/src/session_info/tests.rs index 92a50575deda8413abb18f892680d693c148d0cb..a5bfeae0745599c00de718684420fc377a9184f1 100644 --- a/polkadot/runtime/parachains/src/session_info/tests.rs +++ b/polkadot/runtime/parachains/src/session_info/tests.rs @@ -25,7 +25,7 @@ use crate::{ util::take_active_subset, }; use keyring::Sr25519Keyring; -use primitives::{BlockNumber, ValidatorId, ValidatorIndex}; +use primitives::{vstaging::SchedulerParams, BlockNumber, ValidatorId, ValidatorIndex}; fn run_to_block( to: BlockNumber, @@ -62,9 +62,9 @@ fn run_to_block( fn default_config() -> HostConfiguration { HostConfiguration { - coretime_cores: 1, dispute_period: 2, needed_approvals: 3, + scheduler_params: SchedulerParams { num_cores: 1, ..Default::default() }, ..Default::default() } } diff --git a/polkadot/runtime/rococo/Cargo.toml b/polkadot/runtime/rococo/Cargo.toml index 68ded4aeaecec716a5382b5d9bccbfa7b7f27262..3dc59cc172817409bd607f5b68e52163a7b9afdb 100644 --- a/polkadot/runtime/rococo/Cargo.toml +++ b/polkadot/runtime/rococo/Cargo.toml @@ -14,8 +14,8 @@ workspace = true parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } log = { workspace = true } -serde = { version = "1.0.196", default-features = false } -serde_derive = { version = "1.0.117", optional = true } +serde = { workspace = true } +serde_derive = { optional = true, workspace = true } static_assertions = "1.1.0" smallvec = "1.8.0" @@ -111,7 +111,7 @@ keyring = { package = "sp-keyring", path = "../../../substrate/primitives/keyrin remote-externalities = { package = "frame-remote-externalities", path = "../../../substrate/utils/frame/remote-externalities" } sp-trie = { path = "../../../substrate/primitives/trie" } separator = "0.4.1" -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } sp-tracing = { path = "../../../substrate/primitives/tracing", default-features = false } tokio = { version = "1.24.2", features = ["macros"] } diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 03e96ab388b72c19a95bcc0c4af509fe4e56d36c..96e8c4e0979b8ee77998f2f6c0a7f0b48415e4fa 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -46,9 +46,8 @@ use sp_std::{cmp::Ordering, collections::btree_map::BTreeMap, prelude::*}; use runtime_parachains::{ assigner_coretime as parachains_assigner_coretime, - assigner_on_demand as parachains_assigner_on_demand, - assigner_parachains as parachains_assigner_parachains, - configuration as parachains_configuration, coretime, disputes as parachains_disputes, + assigner_on_demand as parachains_assigner_on_demand, configuration as parachains_configuration, + coretime, disputes as parachains_disputes, disputes::slashing as parachains_slashing, dmp as parachains_dmp, hrmp as parachains_hrmp, inclusion as parachains_inclusion, inclusion::{AggregateMessageOrigin, UmpQueueId}, @@ -150,7 +149,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("rococo"), impl_name: create_runtime_str!("parity-rococo-v2.0"), authoring_version: 0, - spec_version: 1_007_000, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 24, @@ -1038,8 +1037,6 @@ impl parachains_assigner_on_demand::Config for Runtime { type WeightInfo = weights::runtime_parachains_assigner_on_demand::WeightInfo; } -impl parachains_assigner_parachains::Config for Runtime {} - impl parachains_assigner_coretime::Config for Runtime {} impl parachains_initializer::Config for Runtime { @@ -1401,7 +1398,6 @@ construct_runtime! { ParasSlashing: parachains_slashing = 63, MessageQueue: pallet_message_queue = 64, OnDemandAssignmentProvider: parachains_assigner_on_demand = 66, - ParachainsAssignmentProvider: parachains_assigner_parachains = 67, CoretimeAssignmentProvider: parachains_assigner_coretime = 68, // Parachain Onboarding Pallets. Start indices at 70 to leave room. @@ -1662,6 +1658,7 @@ pub mod migrations { parachains_configuration::migration::v11::MigrateToV11, // This needs to come after the `parachains_configuration` above as we are reading the configuration. coretime::migration::MigrateToCoretime, + parachains_configuration::migration::v12::MigrateToV12, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, @@ -1772,7 +1769,7 @@ sp_api::impl_runtime_apis! { Executive::execute_block(block); } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } @@ -2276,12 +2273,30 @@ sp_api::impl_runtime_apis! { TokenLocation::get(), ExistentialDeposit::get() ).into()); - pub ToParachain: ParaId = rococo_runtime_constants::system_parachain::ASSET_HUB_ID.into(); + pub AssetHubParaId: ParaId = rococo_runtime_constants::system_parachain::ASSET_HUB_ID.into(); + pub const RandomParaId: ParaId = ParaId::new(43211234); } impl frame_system_benchmarking::Config for Runtime {} impl frame_benchmarking::baseline::Config for Runtime {} impl pallet_xcm::benchmarking::Config for Runtime { + type DeliveryHelper = ( + runtime_common::xcm_sender::ToParachainDeliveryHelper< + XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForChildParachainDelivery, + AssetHubParaId, + (), + >, + runtime_common::xcm_sender::ToParachainDeliveryHelper< + XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForChildParachainDelivery, + RandomParaId, + (), + > + ); + fn reachable_dest() -> Option { Some(crate::xcm_config::AssetHub::get()) } @@ -2290,7 +2305,7 @@ sp_api::impl_runtime_apis! { // Relay/native token can be teleported to/from AH. Some(( Asset { - fun: Fungible(EXISTENTIAL_DEPOSIT), + fun: Fungible(ExistentialDeposit::get()), id: AssetId(Here.into()) }, crate::xcm_config::AssetHub::get(), @@ -2301,10 +2316,10 @@ sp_api::impl_runtime_apis! { // Relay can reserve transfer native token to some random parachain. Some(( Asset { - fun: Fungible(EXISTENTIAL_DEPOSIT), + fun: Fungible(ExistentialDeposit::get()), id: AssetId(Here.into()) }, - Parachain(43211234).into(), + Parachain(RandomParaId::get().into()).into(), )) } @@ -2321,6 +2336,13 @@ sp_api::impl_runtime_apis! { dest ) } + + fn get_asset() -> Asset { + Asset { + id: AssetId(Location::here()), + fun: Fungible(ExistentialDeposit::get()), + } + } } impl pallet_xcm_benchmarks::Config for Runtime { type XcmConfig = XcmConfig; @@ -2329,7 +2351,7 @@ sp_api::impl_runtime_apis! { XcmConfig, ExistentialDepositAsset, xcm_config::PriceForChildParachainDelivery, - ToParachain, + AssetHubParaId, (), >; fn valid_destination() -> Result { diff --git a/polkadot/runtime/rococo/src/weights/pallet_xcm.rs b/polkadot/runtime/rococo/src/weights/pallet_xcm.rs index 177407ef7088b5c9bdcc460f36cb7d6485743a71..5544ca44658cc09fa51cd75679ba90132eb16444 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_xcm.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_xcm.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_xcm` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -48,10 +48,6 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { - fn transfer_assets() -> Weight { - // TODO: run benchmarks - Weight::zero() - } /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) @@ -62,36 +58,80 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `3607` - // Minimum execution time: 27_328_000 picoseconds. - Weight::from_parts(27_976_000, 0) - .saturating_add(Weight::from_parts(0, 3607)) + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 25_043_000 picoseconds. + Weight::from_parts(25_682_000, 0) + .saturating_add(Weight::from_parts(0, 3645)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn teleport_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 16_280_000 picoseconds. - Weight::from_parts(16_904_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 107_570_000 picoseconds. + Weight::from_parts(109_878_000, 0) + .saturating_add(Weight::from_parts(0, 3645)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn reserve_transfer_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 15_869_000 picoseconds. - Weight::from_parts(16_264_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `232` + // Estimated: `3697` + // Minimum execution time: 106_341_000 picoseconds. + Weight::from_parts(109_135_000, 0) + .saturating_add(Weight::from_parts(0, 3697)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn transfer_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 108_372_000 picoseconds. + Weight::from_parts(112_890_000, 0) + .saturating_add(Weight::from_parts(0, 3645)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) } fn execute() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_923_000 picoseconds. - Weight::from_parts(7_432_000, 0) + // Minimum execution time: 6_957_000 picoseconds. + Weight::from_parts(7_417_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `XcmPallet::SupportedVersion` (r:0 w:1) @@ -100,8 +140,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_333_000 picoseconds. - Weight::from_parts(7_566_000, 0) + // Minimum execution time: 7_053_000 picoseconds. + Weight::from_parts(7_462_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -109,8 +149,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_219_000 picoseconds. - Weight::from_parts(2_375_000, 0) + // Minimum execution time: 1_918_000 picoseconds. + Weight::from_parts(2_037_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) @@ -129,11 +169,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_subscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `3607` - // Minimum execution time: 30_650_000 picoseconds. - Weight::from_parts(31_683_000, 0) - .saturating_add(Weight::from_parts(0, 3607)) + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 30_417_000 picoseconds. + Weight::from_parts(31_191_000, 0) + .saturating_add(Weight::from_parts(0, 3645)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) } @@ -151,11 +191,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `322` - // Estimated: `3787` - // Minimum execution time: 37_666_000 picoseconds. - Weight::from_parts(38_920_000, 0) - .saturating_add(Weight::from_parts(0, 3787)) + // Measured: `360` + // Estimated: `3825` + // Minimum execution time: 36_666_000 picoseconds. + Weight::from_parts(37_779_000, 0) + .saturating_add(Weight::from_parts(0, 3825)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -165,45 +205,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_244_000 picoseconds. - Weight::from_parts(2_425_000, 0) + // Minimum execution time: 1_869_000 picoseconds. + Weight::from_parts(2_003_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `XcmPallet::SupportedVersion` (r:4 w:2) + /// Storage: `XcmPallet::SupportedVersion` (r:5 w:2) /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `26` - // Estimated: `10916` - // Minimum execution time: 14_710_000 picoseconds. - Weight::from_parts(15_156_000, 0) - .saturating_add(Weight::from_parts(0, 10916)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `22` + // Estimated: `13387` + // Minimum execution time: 16_188_000 picoseconds. + Weight::from_parts(16_435_000, 0) + .saturating_add(Weight::from_parts(0, 13387)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `XcmPallet::VersionNotifiers` (r:4 w:2) + /// Storage: `XcmPallet::VersionNotifiers` (r:5 w:2) /// Proof: `XcmPallet::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `30` - // Estimated: `10920` - // Minimum execution time: 14_630_000 picoseconds. - Weight::from_parts(15_290_000, 0) - .saturating_add(Weight::from_parts(0, 10920)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `26` + // Estimated: `13391` + // Minimum execution time: 16_431_000 picoseconds. + Weight::from_parts(16_935_000, 0) + .saturating_add(Weight::from_parts(0, 13391)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:5 w:0) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:6 w:0) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `40` - // Estimated: `13405` - // Minimum execution time: 16_686_000 picoseconds. - Weight::from_parts(17_332_000, 0) - .saturating_add(Weight::from_parts(0, 13405)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15880` + // Minimum execution time: 18_460_000 picoseconds. + Weight::from_parts(18_885_000, 0) + .saturating_add(Weight::from_parts(0, 15880)) + .saturating_add(T::DbWeight::get().reads(6)) } /// Storage: `XcmPallet::VersionNotifyTargets` (r:2 w:1) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -217,38 +257,38 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_current_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `178` - // Estimated: `6118` - // Minimum execution time: 30_180_000 picoseconds. - Weight::from_parts(31_351_000, 0) - .saturating_add(Weight::from_parts(0, 6118)) + // Measured: `216` + // Estimated: `6156` + // Minimum execution time: 29_623_000 picoseconds. + Weight::from_parts(30_661_000, 0) + .saturating_add(Weight::from_parts(0, 6156)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:3 w:0) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:4 w:0) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: // Measured: `69` - // Estimated: `8484` - // Minimum execution time: 9_624_000 picoseconds. - Weight::from_parts(10_029_000, 0) - .saturating_add(Weight::from_parts(0, 8484)) - .saturating_add(T::DbWeight::get().reads(3)) + // Estimated: `10959` + // Minimum execution time: 12_043_000 picoseconds. + Weight::from_parts(12_360_000, 0) + .saturating_add(Weight::from_parts(0, 10959)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:4 w:2) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:5 w:2) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `37` - // Estimated: `10927` - // Minimum execution time: 15_139_000 picoseconds. - Weight::from_parts(15_575_000, 0) - .saturating_add(Weight::from_parts(0, 10927)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `33` + // Estimated: `13398` + // Minimum execution time: 16_511_000 picoseconds. + Weight::from_parts(17_011_000, 0) + .saturating_add(Weight::from_parts(0, 13398)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:4 w:2) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:5 w:2) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -260,12 +300,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `182` - // Estimated: `11072` - // Minimum execution time: 37_871_000 picoseconds. - Weight::from_parts(38_940_000, 0) - .saturating_add(Weight::from_parts(0, 11072)) - .saturating_add(T::DbWeight::get().reads(8)) + // Measured: `216` + // Estimated: `13581` + // Minimum execution time: 39_041_000 picoseconds. + Weight::from_parts(39_883_000, 0) + .saturating_add(Weight::from_parts(0, 13581)) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `XcmPallet::QueryCounter` (r:1 w:1) @@ -276,8 +316,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 2_732_000 picoseconds. - Weight::from_parts(2_892_000, 0) + // Minimum execution time: 2_030_000 picoseconds. + Weight::from_parts(2_150_000, 0) .saturating_add(Weight::from_parts(0, 1485)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -288,10 +328,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7576` // Estimated: `11041` - // Minimum execution time: 23_813_000 picoseconds. - Weight::from_parts(24_201_000, 0) + // Minimum execution time: 22_615_000 picoseconds. + Weight::from_parts(23_008_000, 0) .saturating_add(Weight::from_parts(0, 11041)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `XcmPallet::AssetTraps` (r:1 w:1) + /// Proof: `XcmPallet::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn claim_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `23` + // Estimated: `3488` + // Minimum execution time: 34_438_000 picoseconds. + Weight::from_parts(35_514_000, 0) + .saturating_add(Weight::from_parts(0, 3488)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs index 34541b83597e6284a401a171e703b200366114a0..ca0575cb1b64689b7b016d63a2cfc5080dc874a5 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `runtime_parachains::configuration` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -58,8 +58,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_793_000 picoseconds. - Weight::from_parts(8_192_000, 0) + // Minimum execution time: 7_789_000 picoseconds. + Weight::from_parts(8_269_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -74,8 +74,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_819_000 picoseconds. - Weight::from_parts(8_004_000, 0) + // Minimum execution time: 7_851_000 picoseconds. + Weight::from_parts(8_152_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -90,8 +90,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_760_000 picoseconds. - Weight::from_parts(8_174_000, 0) + // Minimum execution time: 7_960_000 picoseconds. + Weight::from_parts(8_276_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -116,8 +116,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_814_000 picoseconds. - Weight::from_parts(8_098_000, 0) + // Minimum execution time: 7_912_000 picoseconds. + Weight::from_parts(8_164_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -132,8 +132,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 10_028_000 picoseconds. - Weight::from_parts(10_386_000, 0) + // Minimum execution time: 9_782_000 picoseconds. + Weight::from_parts(10_373_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -148,8 +148,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_867_000 picoseconds. - Weight::from_parts(8_191_000, 0) + // Minimum execution time: 7_870_000 picoseconds. + Weight::from_parts(8_274_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -164,8 +164,24 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 10_158_000 picoseconds. - Weight::from_parts(10_430_000, 0) + // Minimum execution time: 9_960_000 picoseconds. + Weight::from_parts(10_514_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_config_with_scheduler_params() -> Weight { + // Proof Size summary in bytes: + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_913_000 picoseconds. + Weight::from_parts(8_338_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/test-runtime/Cargo.toml b/polkadot/runtime/test-runtime/Cargo.toml index 1f041fdfeaa5421af883b761da7afae4cca2ff80..9753a4093045b52c8c709cfcea4a19a15dc510d6 100644 --- a/polkadot/runtime/test-runtime/Cargo.toml +++ b/polkadot/runtime/test-runtime/Cargo.toml @@ -16,8 +16,8 @@ parity-scale-codec = { version = "3.6.1", default-features = false, features = [ log = { workspace = true } rustc-hex = { version = "2.1.0", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", default-features = false } -serde_derive = { version = "1.0.117", optional = true } +serde = { workspace = true } +serde_derive = { optional = true, workspace = true } smallvec = "1.8.0" authority-discovery-primitives = { package = "sp-authority-discovery", path = "../../../substrate/primitives/authority-discovery", default-features = false } @@ -74,7 +74,7 @@ hex-literal = "0.4.1" tiny-keccak = { version = "2.0.2", features = ["keccak"] } keyring = { package = "sp-keyring", path = "../../../substrate/primitives/keyring" } sp-trie = { path = "../../../substrate/primitives/trie" } -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } [build-dependencies] substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder" } diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index b74def5de8aadd718df48b27aff1a7ca324389c3..6c899c5270156dbbdcad0108f7e1ffb69069c288 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -767,7 +767,7 @@ sp_api::impl_runtime_apis! { Executive::execute_block(block); } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } diff --git a/polkadot/runtime/test-runtime/src/xcm_config.rs b/polkadot/runtime/test-runtime/src/xcm_config.rs index a81d35f4d788f1628cf20cf5cbd0e366828101ff..a48bca17e9ff988da3d0ca4601003f3f6d96a71d 100644 --- a/polkadot/runtime/test-runtime/src/xcm_config.rs +++ b/polkadot/runtime/test-runtime/src/xcm_config.rs @@ -16,14 +16,16 @@ use frame_support::{ parameter_types, - traits::{Everything, Nothing}, + traits::{Everything, Get, Nothing}, weights::Weight, }; use frame_system::EnsureRoot; +use polkadot_runtime_parachains::FeeTracker; +use runtime_common::xcm_sender::{ChildParachainRouter, PriceForMessageDelivery}; use xcm::latest::prelude::*; use xcm_builder::{ AllowUnpaidExecutionFrom, EnsureXcmOrigin, FixedWeightBounds, FrameTransactionalProcessor, - SignedAccountId32AsNative, SignedToAccountId32, + SignedAccountId32AsNative, SignedToAccountId32, WithUniqueTopic, }; use xcm_executor::{ traits::{TransactAsset, WeightTrader}, @@ -36,6 +38,8 @@ parameter_types! { pub const MaxInstructions: u32 = 100; pub const MaxAssetsIntoHolding: u32 = 16; pub const UniversalLocation: xcm::latest::InteriorLocation = xcm::latest::Junctions::Here; + pub TokenLocation: Location = Here.into_location(); + pub FeeAssetId: AssetId = AssetId(TokenLocation::get()); } /// Type to convert an `Origin` type value into a `Location` value which represents an interior @@ -45,17 +49,43 @@ pub type LocalOriginToLocation = ( SignedToAccountId32, ); -pub struct DoNothingRouter; -impl SendXcm for DoNothingRouter { - type Ticket = (); - fn validate(_dest: &mut Option, _msg: &mut Option>) -> SendResult<()> { - Ok(((), Assets::new())) - } - fn deliver(_: ()) -> Result { - Ok([0; 32]) +/// Implementation of [`PriceForMessageDelivery`], returning a different price +/// based on whether a message contains a reanchored asset or not. +/// This implementation ensures that messages with non-reanchored assets return higher +/// prices than messages with reanchored assets. +/// Useful for `deposit_reserve_asset_works_for_any_xcm_sender` integration test. +pub struct TestDeliveryPrice(sp_std::marker::PhantomData<(A, F)>); +impl, F: FeeTracker> PriceForMessageDelivery for TestDeliveryPrice { + type Id = F::Id; + + fn price_for_delivery(_: Self::Id, msg: &Xcm<()>) -> Assets { + let base_fee: super::Balance = 1_000_000; + + let parents = msg.iter().find_map(|xcm| match xcm { + ReserveAssetDeposited(assets) => { + let AssetId(location) = &assets.inner().first().unwrap().id; + Some(location.parents) + }, + _ => None, + }); + + // If no asset is found, price defaults to `base_fee`. + let amount = base_fee + .saturating_add(base_fee.saturating_mul(parents.unwrap_or(0) as super::Balance)); + + (A::get(), amount).into() } } +pub type PriceForChildParachainDelivery = TestDeliveryPrice; + +/// The XCM router. When we want to send an XCM message, we use this type. It amalgamates all of our +/// individual routers. +pub type XcmRouter = WithUniqueTopic< + // Only one router so far - use DMP to communicate with child parachains. + ChildParachainRouter, +>; + pub type Barrier = AllowUnpaidExecutionFrom; pub struct DummyAssetTransactor; @@ -99,7 +129,7 @@ type OriginConverter = ( pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { type RuntimeCall = super::RuntimeCall; - type XcmSender = DoNothingRouter; + type XcmSender = XcmRouter; type AssetTransactor = DummyAssetTransactor; type OriginConverter = OriginConverter; type IsReserve = (); @@ -133,7 +163,7 @@ impl pallet_xcm::Config for crate::Runtime { type UniversalLocation = UniversalLocation; type SendXcmOrigin = EnsureXcmOrigin; type Weigher = FixedWeightBounds; - type XcmRouter = DoNothingRouter; + type XcmRouter = XcmRouter; type XcmExecuteFilter = Everything; type XcmExecutor = xcm_executor::XcmExecutor; type XcmTeleportFilter = Everything; diff --git a/polkadot/runtime/westend/Cargo.toml b/polkadot/runtime/westend/Cargo.toml index 31b3f8351d885c0576444c4fdd39e5dc81888c9e..4180828bcfb14d497d9da06bc74da91fa53f8d5f 100644 --- a/polkadot/runtime/westend/Cargo.toml +++ b/polkadot/runtime/westend/Cargo.toml @@ -16,8 +16,8 @@ parity-scale-codec = { version = "3.6.1", default-features = false, features = [ scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } log = { workspace = true } rustc-hex = { version = "2.1.0", default-features = false } -serde = { version = "1.0.196", default-features = false } -serde_derive = { version = "1.0.117", optional = true } +serde = { workspace = true } +serde_derive = { optional = true, workspace = true } smallvec = "1.8.0" authority-discovery-primitives = { package = "sp-authority-discovery", path = "../../../substrate/primitives/authority-discovery", default-features = false } @@ -119,7 +119,7 @@ xcm-builder = { package = "staging-xcm-builder", path = "../../xcm/xcm-builder", hex-literal = "0.4.1" tiny-keccak = { version = "2.0.2", features = ["keccak"] } keyring = { package = "sp-keyring", path = "../../../substrate/primitives/keyring" } -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } remote-externalities = { package = "frame-remote-externalities", path = "../../../substrate/utils/frame/remote-externalities" } tokio = { version = "1.24.2", features = ["macros"] } sp-tracing = { path = "../../../substrate/primitives/tracing", default-features = false } diff --git a/polkadot/runtime/westend/constants/src/lib.rs b/polkadot/runtime/westend/constants/src/lib.rs index 848cccd559dc3e50cbd2a50c4df6a643910fe35c..c98f4b114fd88241dea1e2e5ffcf694848448007 100644 --- a/polkadot/runtime/westend/constants/src/lib.rs +++ b/polkadot/runtime/westend/constants/src/lib.rs @@ -107,6 +107,8 @@ pub mod system_parachain { pub const COLLECTIVES_ID: u32 = 1001; /// BridgeHub parachain ID. pub const BRIDGE_HUB_ID: u32 = 1002; + /// Encointer parachain ID. + pub const ENCOINTER_ID: u32 = 1003; /// People Chain parachain ID. pub const PEOPLE_ID: u32 = 1004; /// Brokerage parachain ID. @@ -128,6 +130,8 @@ pub mod xcm { const ROOT_INDEX: u32 = 0; // The bodies corresponding to the Polkadot OpenGov Origins. pub const FELLOWSHIP_ADMIN_INDEX: u32 = 1; + #[deprecated = "Will be removed after August 2024; Use `xcm::latest::BodyId::Treasury` \ + instead"] pub const TREASURER_INDEX: u32 = 2; } } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index bbb010f60bffe8a8c99050ceb79d11f13143e0c6..b55d68ee0f63e366b5dd2693d25bf806239c6bfa 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -68,9 +68,8 @@ use runtime_common::{ }; use runtime_parachains::{ assigner_coretime as parachains_assigner_coretime, - assigner_on_demand as parachains_assigner_on_demand, - assigner_parachains as parachains_assigner_parachains, - configuration as parachains_configuration, coretime, disputes as parachains_disputes, + assigner_on_demand as parachains_assigner_on_demand, configuration as parachains_configuration, + coretime, disputes as parachains_disputes, disputes::slashing as parachains_slashing, dmp as parachains_dmp, hrmp as parachains_hrmp, inclusion as parachains_inclusion, inclusion::{AggregateMessageOrigin, UmpQueueId}, @@ -151,7 +150,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westend"), impl_name: create_runtime_str!("parity-westend"), authoring_version: 2, - spec_version: 1_007_001, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 24, @@ -1243,8 +1242,6 @@ impl parachains_assigner_on_demand::Config for Runtime { type WeightInfo = weights::runtime_parachains_assigner_on_demand::WeightInfo; } -impl parachains_assigner_parachains::Config for Runtime {} - impl parachains_assigner_coretime::Config for Runtime {} impl parachains_initializer::Config for Runtime { @@ -1505,7 +1502,6 @@ construct_runtime! { ParaSessionInfo: parachains_session_info = 52, ParasDisputes: parachains_disputes = 53, ParasSlashing: parachains_slashing = 54, - ParachainsAssignmentProvider: parachains_assigner_parachains = 55, OnDemandAssignmentProvider: parachains_assigner_on_demand = 56, CoretimeAssignmentProvider: parachains_assigner_coretime = 57, @@ -1703,6 +1699,7 @@ pub mod migrations { // Migrate Identity pallet for Usernames pallet_identity::migration::versioned::V0ToV1, parachains_configuration::migration::v11::MigrateToV11, + parachains_configuration::migration::v12::MigrateToV12, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, // Migrate from legacy lease to coretime. Needs to run after configuration v11 @@ -1797,7 +1794,7 @@ sp_api::impl_runtime_apis! { Executive::execute_block(block); } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } @@ -2348,7 +2345,36 @@ sp_api::impl_runtime_apis! { impl pallet_session_benchmarking::Config for Runtime {} impl pallet_offences_benchmarking::Config for Runtime {} impl pallet_election_provider_support_benchmarking::Config for Runtime {} + + use xcm_config::{AssetHub, TokenLocation}; + + parameter_types! { + pub ExistentialDepositAsset: Option = Some(( + TokenLocation::get(), + ExistentialDeposit::get() + ).into()); + pub AssetHubParaId: ParaId = westend_runtime_constants::system_parachain::ASSET_HUB_ID.into(); + pub const RandomParaId: ParaId = ParaId::new(43211234); + } + impl pallet_xcm::benchmarking::Config for Runtime { + type DeliveryHelper = ( + runtime_common::xcm_sender::ToParachainDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForChildParachainDelivery, + AssetHubParaId, + (), + >, + runtime_common::xcm_sender::ToParachainDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForChildParachainDelivery, + RandomParaId, + (), + > + ); + fn reachable_dest() -> Option { Some(crate::xcm_config::AssetHub::get()) } @@ -2356,7 +2382,7 @@ sp_api::impl_runtime_apis! { fn teleportable_asset_and_dest() -> Option<(Asset, Location)> { // Relay/native token can be teleported to/from AH. Some(( - Asset { fun: Fungible(EXISTENTIAL_DEPOSIT), id: AssetId(Here.into()) }, + Asset { fun: Fungible(ExistentialDeposit::get()), id: AssetId(Here.into()) }, crate::xcm_config::AssetHub::get(), )) } @@ -2365,10 +2391,10 @@ sp_api::impl_runtime_apis! { // Relay can reserve transfer native token to some random parachain. Some(( Asset { - fun: Fungible(EXISTENTIAL_DEPOSIT), + fun: Fungible(ExistentialDeposit::get()), id: AssetId(Here.into()) }, - crate::Junction::Parachain(43211234).into(), + crate::Junction::Parachain(RandomParaId::get().into()).into(), )) } @@ -2386,6 +2412,13 @@ sp_api::impl_runtime_apis! { dest ) } + + fn get_asset() -> Asset { + Asset { + id: AssetId(Location::here()), + fun: Fungible(ExistentialDeposit::get()), + } + } } impl frame_system_benchmarking::Config for Runtime {} impl pallet_nomination_pools_benchmarking::Config for Runtime {} @@ -2395,15 +2428,6 @@ sp_api::impl_runtime_apis! { AssetId, Fungibility::*, InteriorLocation, Junction, Junctions::*, Asset, Assets, Location, NetworkId, Response, }; - use xcm_config::{AssetHub, TokenLocation}; - - parameter_types! { - pub ExistentialDepositAsset: Option = Some(( - TokenLocation::get(), - ExistentialDeposit::get() - ).into()); - pub ToParachain: ParaId = westend_runtime_constants::system_parachain::ASSET_HUB_ID.into(); - } impl pallet_xcm_benchmarks::Config for Runtime { type XcmConfig = xcm_config::XcmConfig; @@ -2412,7 +2436,7 @@ sp_api::impl_runtime_apis! { xcm_config::XcmConfig, ExistentialDepositAsset, xcm_config::PriceForChildParachainDelivery, - ToParachain, + AssetHubParaId, (), >; fn valid_destination() -> Result { diff --git a/polkadot/runtime/westend/src/weights/pallet_xcm.rs b/polkadot/runtime/westend/src/weights/pallet_xcm.rs index 493acd0f9e7bdfbfd1e716b7e82a474643f1050b..10725cecf24995e34b3254baa23535cb91dd9981 100644 --- a/polkadot/runtime/westend/src/weights/pallet_xcm.rs +++ b/polkadot/runtime/westend/src/weights/pallet_xcm.rs @@ -16,26 +16,24 @@ //! Autogenerated weights for `pallet_xcm` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot +// target/production/polkadot // benchmark // pallet -// --chain=westend-dev // --steps=50 // --repeat=20 -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --pallet=pallet_xcm // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=westend-dev // --header=./polkadot/file_header.txt // --output=./polkadot/runtime/westend/src/weights/ @@ -50,10 +48,6 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { - fn transfer_assets() -> Weight { - // TODO: run benchmarks - Weight::zero() - } /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) @@ -64,29 +58,73 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3574` - // Minimum execution time: 28_098_000 picoseconds. - Weight::from_parts(28_887_000, 0) - .saturating_add(Weight::from_parts(0, 3574)) + // Measured: `147` + // Estimated: `3612` + // Minimum execution time: 25_725_000 picoseconds. + Weight::from_parts(26_174_000, 0) + .saturating_add(Weight::from_parts(0, 3612)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn teleport_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 17_609_000 picoseconds. - Weight::from_parts(18_000_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `250` + // Estimated: `6196` + // Minimum execution time: 113_140_000 picoseconds. + Weight::from_parts(116_204_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn reserve_transfer_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 17_007_000 picoseconds. - Weight::from_parts(17_471_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `302` + // Estimated: `6196` + // Minimum execution time: 108_571_000 picoseconds. + Weight::from_parts(110_650_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn transfer_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `250` + // Estimated: `6196` + // Minimum execution time: 111_836_000 picoseconds. + Weight::from_parts(114_435_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `Benchmark::Override` (r:0 w:0) /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -104,8 +142,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_444_000 picoseconds. - Weight::from_parts(7_671_000, 0) + // Minimum execution time: 7_160_000 picoseconds. + Weight::from_parts(7_477_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -113,8 +151,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_126_000 picoseconds. - Weight::from_parts(2_253_000, 0) + // Minimum execution time: 1_934_000 picoseconds. + Weight::from_parts(2_053_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) @@ -133,11 +171,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_subscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3574` - // Minimum execution time: 31_318_000 picoseconds. - Weight::from_parts(32_413_000, 0) - .saturating_add(Weight::from_parts(0, 3574)) + // Measured: `147` + // Estimated: `3612` + // Minimum execution time: 31_123_000 picoseconds. + Weight::from_parts(31_798_000, 0) + .saturating_add(Weight::from_parts(0, 3612)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) } @@ -155,11 +193,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `289` - // Estimated: `3754` - // Minimum execution time: 35_282_000 picoseconds. - Weight::from_parts(35_969_000, 0) - .saturating_add(Weight::from_parts(0, 3754)) + // Measured: `327` + // Estimated: `3792` + // Minimum execution time: 35_175_000 picoseconds. + Weight::from_parts(36_098_000, 0) + .saturating_add(Weight::from_parts(0, 3792)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -169,45 +207,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_247_000 picoseconds. - Weight::from_parts(2_381_000, 0) + // Minimum execution time: 1_974_000 picoseconds. + Weight::from_parts(2_096_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `XcmPallet::SupportedVersion` (r:4 w:2) + /// Storage: `XcmPallet::SupportedVersion` (r:5 w:2) /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `26` - // Estimated: `10916` - // Minimum execution time: 14_512_000 picoseconds. - Weight::from_parts(15_042_000, 0) - .saturating_add(Weight::from_parts(0, 10916)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `22` + // Estimated: `13387` + // Minimum execution time: 16_626_000 picoseconds. + Weight::from_parts(17_170_000, 0) + .saturating_add(Weight::from_parts(0, 13387)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `XcmPallet::VersionNotifiers` (r:4 w:2) + /// Storage: `XcmPallet::VersionNotifiers` (r:5 w:2) /// Proof: `XcmPallet::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `30` - // Estimated: `10920` - // Minimum execution time: 14_659_000 picoseconds. - Weight::from_parts(15_164_000, 0) - .saturating_add(Weight::from_parts(0, 10920)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `26` + // Estimated: `13391` + // Minimum execution time: 16_937_000 picoseconds. + Weight::from_parts(17_447_000, 0) + .saturating_add(Weight::from_parts(0, 13391)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:5 w:0) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:6 w:0) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `40` - // Estimated: `13405` - // Minimum execution time: 16_261_000 picoseconds. - Weight::from_parts(16_986_000, 0) - .saturating_add(Weight::from_parts(0, 13405)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15880` + // Minimum execution time: 19_157_000 picoseconds. + Weight::from_parts(19_659_000, 0) + .saturating_add(Weight::from_parts(0, 15880)) + .saturating_add(T::DbWeight::get().reads(6)) } /// Storage: `XcmPallet::VersionNotifyTargets` (r:2 w:1) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -221,38 +259,38 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_current_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `6085` - // Minimum execution time: 30_539_000 picoseconds. - Weight::from_parts(31_117_000, 0) - .saturating_add(Weight::from_parts(0, 6085)) + // Measured: `183` + // Estimated: `6123` + // Minimum execution time: 30_699_000 picoseconds. + Weight::from_parts(31_537_000, 0) + .saturating_add(Weight::from_parts(0, 6123)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:3 w:0) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:4 w:0) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: // Measured: `69` - // Estimated: `8484` - // Minimum execution time: 9_463_000 picoseconds. - Weight::from_parts(9_728_000, 0) - .saturating_add(Weight::from_parts(0, 8484)) - .saturating_add(T::DbWeight::get().reads(3)) + // Estimated: `10959` + // Minimum execution time: 12_303_000 picoseconds. + Weight::from_parts(12_670_000, 0) + .saturating_add(Weight::from_parts(0, 10959)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:4 w:2) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:5 w:2) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `37` - // Estimated: `10927` - // Minimum execution time: 15_169_000 picoseconds. - Weight::from_parts(15_694_000, 0) - .saturating_add(Weight::from_parts(0, 10927)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `33` + // Estimated: `13398` + // Minimum execution time: 17_129_000 picoseconds. + Weight::from_parts(17_668_000, 0) + .saturating_add(Weight::from_parts(0, 13398)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:4 w:2) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:5 w:2) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -264,12 +302,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `149` - // Estimated: `11039` - // Minimum execution time: 37_549_000 picoseconds. - Weight::from_parts(38_203_000, 0) - .saturating_add(Weight::from_parts(0, 11039)) - .saturating_add(T::DbWeight::get().reads(8)) + // Measured: `183` + // Estimated: `13548` + // Minimum execution time: 39_960_000 picoseconds. + Weight::from_parts(41_068_000, 0) + .saturating_add(Weight::from_parts(0, 13548)) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `XcmPallet::QueryCounter` (r:1 w:1) @@ -280,8 +318,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 2_947_000 picoseconds. - Weight::from_parts(3_117_000, 0) + // Minimum execution time: 2_333_000 picoseconds. + Weight::from_parts(2_504_000, 0) .saturating_add(Weight::from_parts(0, 1485)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -292,10 +330,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7576` // Estimated: `11041` - // Minimum execution time: 24_595_000 picoseconds. - Weight::from_parts(24_907_000, 0) + // Minimum execution time: 22_932_000 picoseconds. + Weight::from_parts(23_307_000, 0) .saturating_add(Weight::from_parts(0, 11041)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `XcmPallet::AssetTraps` (r:1 w:1) + /// Proof: `XcmPallet::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn claim_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `23` + // Estimated: `3488` + // Minimum execution time: 34_558_000 picoseconds. + Weight::from_parts(35_299_000, 0) + .saturating_add(Weight::from_parts(0, 3488)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/polkadot/runtime/westend/src/weights/runtime_parachains_configuration.rs b/polkadot/runtime/westend/src/weights/runtime_parachains_configuration.rs index 3a4813b667c68ea6400c9ad58cea8fd1bfece5d0..8fa3207c6446082a97877f488913d9c063114fca 100644 --- a/polkadot/runtime/westend/src/weights/runtime_parachains_configuration.rs +++ b/polkadot/runtime/westend/src/weights/runtime_parachains_configuration.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `runtime_parachains::configuration` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -58,8 +58,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 8_065_000 picoseconds. - Weight::from_parts(8_389_000, 0) + // Minimum execution time: 7_775_000 picoseconds. + Weight::from_parts(8_036_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -74,8 +74,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 8_038_000 picoseconds. - Weight::from_parts(8_463_000, 0) + // Minimum execution time: 7_708_000 picoseconds. + Weight::from_parts(7_971_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -90,8 +90,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_843_000 picoseconds. - Weight::from_parts(8_216_000, 0) + // Minimum execution time: 7_746_000 picoseconds. + Weight::from_parts(8_028_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -116,8 +116,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_969_000 picoseconds. - Weight::from_parts(8_362_000, 0) + // Minimum execution time: 7_729_000 picoseconds. + Weight::from_parts(7_954_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -132,8 +132,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 10_084_000 picoseconds. - Weight::from_parts(10_451_000, 0) + // Minimum execution time: 9_871_000 picoseconds. + Weight::from_parts(10_075_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -148,8 +148,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_948_000 picoseconds. - Weight::from_parts(8_268_000, 0) + // Minimum execution time: 7_869_000 picoseconds. + Weight::from_parts(8_000_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -164,8 +164,24 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 10_257_000 picoseconds. - Weight::from_parts(10_584_000, 0) + // Minimum execution time: 9_797_000 picoseconds. + Weight::from_parts(10_373_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_config_with_scheduler_params() -> Weight { + // Proof Size summary in bytes: + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_718_000 picoseconds. + Weight::from_parts(7_984_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/westend/src/xcm_config.rs b/polkadot/runtime/westend/src/xcm_config.rs index 06124d896fbb31ecca032149fb5604008f51f4b7..c57af987ca78050e129ab37736bfb8df7ea46834 100644 --- a/polkadot/runtime/westend/src/xcm_config.rs +++ b/polkadot/runtime/westend/src/xcm_config.rs @@ -34,9 +34,7 @@ use runtime_common::{ }; use sp_core::ConstU32; use westend_runtime_constants::{ - currency::CENTS, - system_parachain::*, - xcm::body::{FELLOWSHIP_ADMIN_INDEX, TREASURER_INDEX}, + currency::CENTS, system_parachain::*, xcm::body::FELLOWSHIP_ADMIN_INDEX, }; use xcm::latest::prelude::*; use xcm_builder::{ @@ -114,12 +112,14 @@ parameter_types! { pub AssetHub: Location = Parachain(ASSET_HUB_ID).into_location(); pub Collectives: Location = Parachain(COLLECTIVES_ID).into_location(); pub BridgeHub: Location = Parachain(BRIDGE_HUB_ID).into_location(); + pub Encointer: Location = Parachain(ENCOINTER_ID).into_location(); pub People: Location = Parachain(PEOPLE_ID).into_location(); pub Broker: Location = Parachain(BROKER_ID).into_location(); pub Wnd: AssetFilter = Wild(AllOf { fun: WildFungible, id: AssetId(TokenLocation::get()) }); pub WndForAssetHub: (AssetFilter, Location) = (Wnd::get(), AssetHub::get()); pub WndForCollectives: (AssetFilter, Location) = (Wnd::get(), Collectives::get()); pub WndForBridgeHub: (AssetFilter, Location) = (Wnd::get(), BridgeHub::get()); + pub WndForEncointer: (AssetFilter, Location) = (Wnd::get(), Encointer::get()); pub WndForPeople: (AssetFilter, Location) = (Wnd::get(), People::get()); pub WndForBroker: (AssetFilter, Location) = (Wnd::get(), Broker::get()); pub MaxInstructions: u32 = 100; @@ -130,6 +130,7 @@ pub type TrustedTeleporters = ( xcm_builder::Case, xcm_builder::Case, xcm_builder::Case, + xcm_builder::Case, xcm_builder::Case, xcm_builder::Case, ); @@ -228,7 +229,7 @@ parameter_types! { // FellowshipAdmin pluralistic body. pub const FellowshipAdminBodyId: BodyId = BodyId::Index(FELLOWSHIP_ADMIN_INDEX); // `Treasurer` pluralistic body. - pub const TreasurerBodyId: BodyId = BodyId::Index(TREASURER_INDEX); + pub const TreasurerBodyId: BodyId = BodyId::Treasury; } /// Type to convert the `GeneralAdmin` origin to a Plurality `Location` value. @@ -262,7 +263,7 @@ pub type LocalPalletOriginToLocation = ( StakingAdminToPlurality, // FellowshipAdmin origin to be used in XCM as a corresponding Plurality `Location` value. FellowshipAdminToPlurality, - // `Treasurer` origin to be used in XCM as a corresponding Plurality `MultiLocation` value. + // `Treasurer` origin to be used in XCM as a corresponding Plurality `Location` value. TreasurerToPlurality, ); diff --git a/polkadot/statement-table/Cargo.toml b/polkadot/statement-table/Cargo.toml index 6403b822ed9b1ad6879698b3909f0f48daf1e765..37b8a99d640a2d23e0d7a532adc0c43b04bba1f2 100644 --- a/polkadot/statement-table/Cargo.toml +++ b/polkadot/statement-table/Cargo.toml @@ -13,3 +13,4 @@ workspace = true parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } sp-core = { path = "../../substrate/primitives/core" } primitives = { package = "polkadot-primitives", path = "../primitives" } +gum = { package = "tracing-gum", path = "../node/gum" } diff --git a/polkadot/statement-table/src/generic.rs b/polkadot/statement-table/src/generic.rs index 22bffde5acc11b8fdbea565d054949caf40d788f..2ee6f6a4f781842866728e2105d6c23e2fa1cd70 100644 --- a/polkadot/statement-table/src/generic.rs +++ b/polkadot/statement-table/src/generic.rs @@ -36,6 +36,7 @@ use primitives::{ }; use parity_scale_codec::{Decode, Encode}; +const LOG_TARGET: &str = "parachain::statement-table"; /// Context for the statement table. pub trait Context { @@ -53,9 +54,6 @@ pub trait Context { /// get the digest of a candidate. fn candidate_digest(candidate: &Self::Candidate) -> Self::Digest; - /// get the group of a candidate. - fn candidate_group(candidate: &Self::Candidate) -> Self::GroupId; - /// Whether a authority is a member of a group. /// Members are meant to submit candidates and vote on validity. fn is_member_of(&self, authority: &Self::AuthorityId, group: &Self::GroupId) -> bool; @@ -342,13 +340,13 @@ impl Table { pub fn import_statement( &mut self, context: &Ctx, + group_id: Ctx::GroupId, statement: SignedStatement, ) -> Option> { let SignedStatement { statement, signature, sender: signer } = statement; - let res = match statement { Statement::Seconded(candidate) => - self.import_candidate(context, signer.clone(), candidate, signature), + self.import_candidate(context, signer.clone(), candidate, signature, group_id), Statement::Valid(digest) => self.validity_vote(context, signer.clone(), digest, ValidityVote::Valid(signature)), }; @@ -387,9 +385,10 @@ impl Table { authority: Ctx::AuthorityId, candidate: Ctx::Candidate, signature: Ctx::Signature, + group: Ctx::GroupId, ) -> ImportResult { - let group = Ctx::candidate_group(&candidate); if !context.is_member_of(&authority, &group) { + gum::debug!(target: LOG_TARGET, authority = ?authority, group = ?group, "New `Misbehavior::UnauthorizedStatement`, candidate backed by validator that doesn't belong to expected group" ); return Err(Misbehavior::UnauthorizedStatement(UnauthorizedStatement { statement: SignedStatement { signature, @@ -634,10 +633,6 @@ mod tests { Digest(candidate.1) } - fn candidate_group(candidate: &Candidate) -> GroupId { - GroupId(candidate.0) - } - fn is_member_of(&self, authority: &AuthorityId, group: &GroupId) -> bool { self.authorities.get(authority).map(|v| v == group).unwrap_or(false) } @@ -675,10 +670,10 @@ mod tests { sender: AuthorityId(1), }; - table.import_statement(&context, statement_a); + table.import_statement(&context, GroupId(2), statement_a); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); - table.import_statement(&context, statement_b); + table.import_statement(&context, GroupId(2), statement_b); assert_eq!( table.detected_misbehavior[&AuthorityId(1)][0], Misbehavior::MultipleCandidates(MultipleCandidates { @@ -711,10 +706,10 @@ mod tests { sender: AuthorityId(1), }; - table.import_statement(&context, statement_a); + table.import_statement(&context, GroupId(2), statement_a); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); - table.import_statement(&context, statement_b); + table.import_statement(&context, GroupId(2), statement_b); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); } @@ -735,7 +730,7 @@ mod tests { sender: AuthorityId(1), }; - table.import_statement(&context, statement); + table.import_statement(&context, GroupId(2), statement); assert_eq!( table.detected_misbehavior[&AuthorityId(1)][0], @@ -769,7 +764,7 @@ mod tests { }; let candidate_a_digest = Digest(100); - table.import_statement(&context, candidate_a); + table.import_statement(&context, GroupId(2), candidate_a); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(2))); @@ -779,7 +774,7 @@ mod tests { signature: Signature(2), sender: AuthorityId(2), }; - table.import_statement(&context, bad_validity_vote); + table.import_statement(&context, GroupId(3), bad_validity_vote); assert_eq!( table.detected_misbehavior[&AuthorityId(2)][0], @@ -811,7 +806,7 @@ mod tests { sender: AuthorityId(1), }; - table.import_statement(&context, statement); + table.import_statement(&context, GroupId(2), statement); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); let invalid_statement = SignedStatement { @@ -820,7 +815,7 @@ mod tests { sender: AuthorityId(1), }; - table.import_statement(&context, invalid_statement); + table.import_statement(&context, GroupId(2), invalid_statement); assert!(table.detected_misbehavior.contains_key(&AuthorityId(1))); } @@ -842,7 +837,7 @@ mod tests { }; let candidate_digest = Digest(100); - table.import_statement(&context, statement); + table.import_statement(&context, GroupId(2), statement); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); let extra_vote = SignedStatement { @@ -851,7 +846,7 @@ mod tests { sender: AuthorityId(1), }; - table.import_statement(&context, extra_vote); + table.import_statement(&context, GroupId(2), extra_vote); assert_eq!( table.detected_misbehavior[&AuthorityId(1)][0], Misbehavior::ValidityDoubleVote(ValidityDoubleVote::IssuedAndValidity( @@ -910,7 +905,7 @@ mod tests { }; let candidate_digest = Digest(100); - table.import_statement(&context, statement); + table.import_statement(&context, GroupId(2), statement); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); assert!(table.attested_candidate(&candidate_digest, &context, 2).is_none()); @@ -921,7 +916,7 @@ mod tests { sender: AuthorityId(2), }; - table.import_statement(&context, vote); + table.import_statement(&context, GroupId(2), vote); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(2))); assert!(table.attested_candidate(&candidate_digest, &context, 2).is_some()); } @@ -944,7 +939,7 @@ mod tests { }; let summary = table - .import_statement(&context, statement) + .import_statement(&context, GroupId(2), statement) .expect("candidate import to give summary"); assert_eq!(summary.candidate, Digest(100)); @@ -971,7 +966,7 @@ mod tests { }; let candidate_digest = Digest(100); - table.import_statement(&context, statement); + table.import_statement(&context, GroupId(2), statement); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); let vote = SignedStatement { @@ -980,8 +975,9 @@ mod tests { sender: AuthorityId(2), }; - let summary = - table.import_statement(&context, vote).expect("candidate vote to give summary"); + let summary = table + .import_statement(&context, GroupId(2), vote) + .expect("candidate vote to give summary"); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(2))); diff --git a/polkadot/statement-table/src/lib.rs b/polkadot/statement-table/src/lib.rs index d4629330ac01512e2dc2b0f441d2302f0fd38624..3740d15cc4f326962b962557e61ca14e9163de7d 100644 --- a/polkadot/statement-table/src/lib.rs +++ b/polkadot/statement-table/src/lib.rs @@ -35,8 +35,8 @@ pub use generic::{Config, Context, Table}; pub mod v2 { use crate::generic; use primitives::{ - CandidateHash, CommittedCandidateReceipt, CompactStatement as PrimitiveStatement, Id, - ValidatorIndex, ValidatorSignature, + CandidateHash, CommittedCandidateReceipt, CompactStatement as PrimitiveStatement, + CoreIndex, ValidatorIndex, ValidatorSignature, }; /// Statements about candidates on the network. @@ -59,7 +59,7 @@ pub mod v2 { >; /// A summary of import of a statement. - pub type Summary = generic::Summary; + pub type Summary = generic::Summary; impl<'a> From<&'a Statement> for PrimitiveStatement { fn from(s: &'a Statement) -> PrimitiveStatement { diff --git a/polkadot/xcm/Cargo.toml b/polkadot/xcm/Cargo.toml index 888757150b45f68acc7c5fe6ad452e2332024496..f9ccfb9833a6b276f001491ff29daa183f0f8e22 100644 --- a/polkadot/xcm/Cargo.toml +++ b/polkadot/xcm/Cargo.toml @@ -18,7 +18,7 @@ log = { workspace = true } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } sp-weights = { path = "../../substrate/primitives/weights", default-features = false, features = ["serde"] } -serde = { version = "1.0.196", default-features = false, features = ["alloc", "derive", "rc"] } +serde = { features = ["alloc", "derive", "rc"], workspace = true } schemars = { version = "0.8.13", default-features = true, optional = true } xcm-procedural = { path = "procedural" } environmental = { version = "1.1.4", default-features = false } diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs index 6ce8d3e99e8e54114048ff35d2c2a7c38b01b9c4..63ed0ac0ca736450077a4677d29d65a81e9eaf3b 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs @@ -22,10 +22,8 @@ use codec::Encode; use frame_benchmarking::{account, BenchmarkError}; use sp_std::prelude::*; use xcm::latest::prelude::*; -use xcm_executor::{ - traits::{ConvertLocation, FeeReason}, - Config as XcmConfig, FeesMode, -}; +use xcm_builder::EnsureDelivery; +use xcm_executor::{traits::ConvertLocation, Config as XcmConfig}; pub mod fungible; pub mod generic; @@ -114,29 +112,3 @@ pub fn account_and_location(index: u32) -> (T::AccountId, Location) { (account, location) } - -/// Trait for a type which ensures all requirements for successful delivery with XCM transport -/// layers. -pub trait EnsureDelivery { - /// Prepare all requirements for successful `XcmSender: SendXcm` passing (accounts, balances, - /// channels ...). Returns: - /// - possible `FeesMode` which is expected to be set to executor - /// - possible `Assets` which are expected to be subsume to the Holding Register - fn ensure_successful_delivery( - origin_ref: &Location, - dest: &Location, - fee_reason: FeeReason, - ) -> (Option, Option); -} - -/// `()` implementation does nothing which means no special requirements for environment. -impl EnsureDelivery for () { - fn ensure_successful_delivery( - _origin_ref: &Location, - _dest: &Location, - _fee_reason: FeeReason, - ) -> (Option, Option) { - // doing nothing - (None, None) - } -} diff --git a/polkadot/xcm/pallet-xcm/Cargo.toml b/polkadot/xcm/pallet-xcm/Cargo.toml index ac39e5f74292c4e0267b074734b6be49ba880996..4840b6127f55425de91eea85099649d4dfda0881 100644 --- a/polkadot/xcm/pallet-xcm/Cargo.toml +++ b/polkadot/xcm/pallet-xcm/Cargo.toml @@ -13,7 +13,7 @@ workspace = true bounded-collections = { version = "0.2.0", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", optional = true, features = ["derive"] } +serde = { optional = true, features = ["derive"], workspace = true, default-features = true } log = { workspace = true } frame-support = { path = "../../../substrate/frame/support", default-features = false } diff --git a/polkadot/xcm/pallet-xcm/src/benchmarking.rs b/polkadot/xcm/pallet-xcm/src/benchmarking.rs index c7d8fb24e9df320f8439616591c467f3f6d7aa1e..ed42f93692b406ea07b2edbda0fbdb315c2b4071 100644 --- a/polkadot/xcm/pallet-xcm/src/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm/src/benchmarking.rs @@ -17,21 +17,26 @@ use super::*; use bounded_collections::{ConstU32, WeakBoundedVec}; use frame_benchmarking::{benchmarks, whitelisted_caller, BenchmarkError, BenchmarkResult}; -use frame_support::{traits::Currency, weights::Weight}; +use frame_support::{ + traits::fungible::{Inspect, Mutate}, + weights::Weight, +}; use frame_system::RawOrigin; use sp_std::prelude::*; use xcm::{latest::prelude::*, v2}; +use xcm_builder::EnsureDelivery; +use xcm_executor::traits::FeeReason; type RuntimeOrigin = ::RuntimeOrigin; -// existential deposit multiplier -const ED_MULTIPLIER: u32 = 100; - /// Pallet we're benchmarking here. pub struct Pallet(crate::Pallet); /// Trait that must be implemented by runtime to be able to benchmark pallet properly. pub trait Config: crate::Config { + /// Helper that ensures successful delivery for extrinsics/benchmarks which need `SendXcm`. + type DeliveryHelper: EnsureDelivery; + /// A `Location` that can be reached via `XcmRouter`. Used only in benchmarks. /// /// If `None`, the benchmarks that depend on a reachable destination will be skipped. @@ -74,6 +79,13 @@ pub trait Config: crate::Config { fn set_up_complex_asset_transfer() -> Option<(Assets, u32, Location, Box)> { None } + + /// Gets an asset that can be handled by the AssetTransactor. + /// + /// Used only in benchmarks. + /// + /// Used, for example, in the benchmark for `claim_assets`. + fn get_asset() -> Asset; } benchmarks! { @@ -107,23 +119,29 @@ benchmarks! { }.into(); let assets: Assets = asset.into(); - let existential_deposit = T::ExistentialDeposit::get(); - let caller = whitelisted_caller(); - - // Give some multiple of the existential deposit - let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); - assert!(balance >= transferred_amount); - let _ = as Currency<_>>::make_free_balance_be(&caller, balance); - // verify initial balance - assert_eq!(pallet_balances::Pallet::::free_balance(&caller), balance); - + let caller: T::AccountId = whitelisted_caller(); let send_origin = RawOrigin::Signed(caller.clone()); let origin_location = T::ExecuteXcmOrigin::try_origin(send_origin.clone().into()) .map_err(|_| BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; - if !T::XcmTeleportFilter::contains(&(origin_location, assets.clone().into_inner())) { + if !T::XcmTeleportFilter::contains(&(origin_location.clone(), assets.clone().into_inner())) { return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) } + // Ensure that origin can send to destination (e.g. setup delivery fees, ensure router setup, ...) + let (_, _) = T::DeliveryHelper::ensure_successful_delivery( + &origin_location, + &destination, + FeeReason::ChargeFees, + ); + + // Actual balance (e.g. `ensure_successful_delivery` could drip delivery fees, ...) + let balance = as Inspect<_>>::balance(&caller); + // Add transferred_amount to origin + as Mutate<_>>::mint_into(&caller, transferred_amount)?; + // verify initial balance + let balance = balance + transferred_amount; + assert_eq!( as Inspect<_>>::balance(&caller), balance); + let recipient = [0u8; 32]; let versioned_dest: VersionedLocation = destination.into(); let versioned_beneficiary: VersionedLocation = @@ -132,7 +150,7 @@ benchmarks! { }: _>(send_origin.into(), Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0) verify { // verify balance after transfer, decreased by transferred amount (+ maybe XCM delivery fees) - assert!(pallet_balances::Pallet::::free_balance(&caller) <= balance - transferred_amount); + assert!( as Inspect<_>>::balance(&caller) <= balance - transferred_amount); } reserve_transfer_assets { @@ -146,23 +164,29 @@ benchmarks! { }.into(); let assets: Assets = asset.into(); - let existential_deposit = T::ExistentialDeposit::get(); - let caller = whitelisted_caller(); - - // Give some multiple of the existential deposit - let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); - assert!(balance >= transferred_amount); - let _ = as Currency<_>>::make_free_balance_be(&caller, balance); - // verify initial balance - assert_eq!(pallet_balances::Pallet::::free_balance(&caller), balance); - + let caller: T::AccountId = whitelisted_caller(); let send_origin = RawOrigin::Signed(caller.clone()); let origin_location = T::ExecuteXcmOrigin::try_origin(send_origin.clone().into()) .map_err(|_| BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; - if !T::XcmReserveTransferFilter::contains(&(origin_location, assets.clone().into_inner())) { + if !T::XcmReserveTransferFilter::contains(&(origin_location.clone(), assets.clone().into_inner())) { return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) } + // Ensure that origin can send to destination (e.g. setup delivery fees, ensure router setup, ...) + let (_, _) = T::DeliveryHelper::ensure_successful_delivery( + &origin_location, + &destination, + FeeReason::ChargeFees, + ); + + // Actual balance (e.g. `ensure_successful_delivery` could drip delivery fees, ...) + let balance = as Inspect<_>>::balance(&caller); + // Add transferred_amount to origin + as Mutate<_>>::mint_into(&caller, transferred_amount)?; + // verify initial balance + let balance = balance + transferred_amount; + assert_eq!( as Inspect<_>>::balance(&caller), balance); + let recipient = [0u8; 32]; let versioned_dest: VersionedLocation = destination.into(); let versioned_beneficiary: VersionedLocation = @@ -171,7 +195,7 @@ benchmarks! { }: _>(send_origin.into(), Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0) verify { // verify balance after transfer, decreased by transferred amount (+ maybe XCM delivery fees) - assert!(pallet_balances::Pallet::::free_balance(&caller) <= balance - transferred_amount); + assert!( as Inspect<_>>::balance(&caller) <= balance - transferred_amount); } transfer_assets { @@ -324,11 +348,23 @@ benchmarks! { u32::MAX, ).unwrap()).collect::>(); crate::Pallet::::expect_response(query_id, Response::PalletsInfo(infos.try_into().unwrap())); - }: { as QueryHandler>::take_response(query_id); } + claim_assets { + let claim_origin = RawOrigin::Signed(whitelisted_caller()); + let claim_location = T::ExecuteXcmOrigin::try_origin(claim_origin.clone().into()).map_err(|_| BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; + let asset: Asset = T::get_asset(); + // Trap assets for claiming later + crate::Pallet::::drop_assets( + &claim_location, + asset.clone().into(), + &XcmContext { origin: None, message_id: [0u8; 32], topic: None } + ); + let versioned_assets = VersionedAssets::V4(asset.into()); + }: _>(claim_origin.into(), Box::new(versioned_assets), Box::new(VersionedLocation::V4(claim_location))) + impl_benchmark_test_suite!( Pallet, crate::mock::new_test_ext_with_balances(Vec::new()), diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 5e1a3e55f9b62b67bb3cedb6a279a1c4c9046405..1a1f8b402a39041ffb5852dd56e9725a550afc7c 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -85,6 +85,7 @@ pub trait WeightInfo { fn migrate_and_notify_old_targets() -> Weight; fn new_query() -> Weight; fn take_response() -> Weight; + fn claim_assets() -> Weight; } /// fallback implementation @@ -165,6 +166,10 @@ impl WeightInfo for TestWeightInfo { fn take_response() -> Weight { Weight::from_parts(100_000_000, 0) } + + fn claim_assets() -> Weight { + Weight::from_parts(100_000_000, 0) + } } #[frame_support::pallet] @@ -1386,6 +1391,64 @@ pub mod pallet { weight_limit, ) } + + /// Claims assets trapped on this pallet because of leftover assets during XCM execution. + /// + /// - `origin`: Anyone can call this extrinsic. + /// - `assets`: The exact assets that were trapped. Use the version to specify what version + /// was the latest when they were trapped. + /// - `beneficiary`: The location/account where the claimed assets will be deposited. + #[pallet::call_index(12)] + #[pallet::weight({ + let assets_version = assets.identify_version(); + let maybe_assets: Result = (*assets.clone()).try_into(); + let maybe_beneficiary: Result = (*beneficiary.clone()).try_into(); + match (maybe_assets, maybe_beneficiary) { + (Ok(assets), Ok(beneficiary)) => { + let ticket: Location = GeneralIndex(assets_version as u128).into(); + let mut message = Xcm(vec![ + ClaimAsset { assets: assets.clone(), ticket }, + DepositAsset { assets: AllCounted(assets.len() as u32).into(), beneficiary }, + ]); + T::Weigher::weight(&mut message).map_or(Weight::MAX, |w| T::WeightInfo::claim_assets().saturating_add(w)) + } + _ => Weight::MAX + } + })] + pub fn claim_assets( + origin: OriginFor, + assets: Box, + beneficiary: Box, + ) -> DispatchResult { + let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; + log::debug!(target: "xcm::pallet_xcm::claim_assets", "origin: {:?}, assets: {:?}, beneficiary: {:?}", origin_location, assets, beneficiary); + // Extract version from `assets`. + let assets_version = assets.identify_version(); + let assets: Assets = (*assets).try_into().map_err(|()| Error::::BadVersion)?; + let number_of_assets = assets.len() as u32; + let beneficiary: Location = + (*beneficiary).try_into().map_err(|()| Error::::BadVersion)?; + let ticket: Location = GeneralIndex(assets_version as u128).into(); + let mut message = Xcm(vec![ + ClaimAsset { assets, ticket }, + DepositAsset { assets: AllCounted(number_of_assets).into(), beneficiary }, + ]); + let weight = + T::Weigher::weight(&mut message).map_err(|()| Error::::UnweighableMessage)?; + let mut hash = message.using_encoded(sp_io::hashing::blake2_256); + let outcome = T::XcmExecutor::prepare_and_execute( + origin_location, + message, + &mut hash, + weight, + weight, + ); + outcome.ensure_complete().map_err(|error| { + log::error!(target: "xcm::pallet_xcm::claim_assets", "XCM execution failed with error: {:?}", error); + Error::::LocalExecutionIncomplete + })?; + Ok(()) + } } } diff --git a/polkadot/xcm/pallet-xcm/src/mock.rs b/polkadot/xcm/pallet-xcm/src/mock.rs index 004a73ca6ab23ff0bcf59748ba477fce2fdb341d..d5ba7312a3a55dcddbc24c64e613985fca0c7bb9 100644 --- a/polkadot/xcm/pallet-xcm/src/mock.rs +++ b/polkadot/xcm/pallet-xcm/src/mock.rs @@ -172,7 +172,7 @@ impl SendXcm for TestSendXcm { msg: &mut Option>, ) -> SendResult<(Location, Xcm<()>)> { if FAIL_SEND_XCM.with(|q| *q.borrow()) { - return Err(SendError::Transport("Intentional send failure used in tests")) + return Err(SendError::Transport("Intentional send failure used in tests")); } let pair = (dest.take().unwrap(), msg.take().unwrap()); Ok((pair, Assets::new())) @@ -563,8 +563,30 @@ impl pallet_test_notifier::Config for Test { type RuntimeCall = RuntimeCall; } +#[cfg(feature = "runtime-benchmarks")] +pub struct TestDeliveryHelper; +#[cfg(feature = "runtime-benchmarks")] +impl xcm_builder::EnsureDelivery for TestDeliveryHelper { + fn ensure_successful_delivery( + origin_ref: &Location, + _dest: &Location, + _fee_reason: xcm_executor::traits::FeeReason, + ) -> (Option, Option) { + use xcm_executor::traits::ConvertLocation; + let account = SovereignAccountOf::convert_location(origin_ref).expect("Valid location"); + // Give the existential deposit at least + let balance = ExistentialDeposit::get(); + let _ = >::make_free_balance_be( + &account, balance, + ); + (None, None) + } +} + #[cfg(feature = "runtime-benchmarks")] impl super::benchmarking::Config for Test { + type DeliveryHelper = TestDeliveryHelper; + fn reachable_dest() -> Option { Some(Parachain(1000).into()) } @@ -632,6 +654,10 @@ impl super::benchmarking::Config for Test { }); Some((assets, fee_index as u32, dest, verify)) } + + fn get_asset() -> Asset { + Asset { id: AssetId(Location::here()), fun: Fungible(ExistentialDeposit::get()) } + } } pub(crate) fn last_event() -> RuntimeEvent { diff --git a/polkadot/xcm/pallet-xcm/src/tests/mod.rs b/polkadot/xcm/pallet-xcm/src/tests/mod.rs index 28c7d197443b070423bfb694593c6feb0d471534..13022d9a8b1f69f528393ee8ac16e3d62151bae9 100644 --- a/polkadot/xcm/pallet-xcm/src/tests/mod.rs +++ b/polkadot/xcm/pallet-xcm/src/tests/mod.rs @@ -467,6 +467,57 @@ fn trapped_assets_can_be_claimed() { }); } +// Like `trapped_assets_can_be_claimed` but using the `claim_assets` extrinsic. +#[test] +fn claim_assets_works() { + let balances = vec![(ALICE, INITIAL_BALANCE)]; + new_test_ext_with_balances(balances).execute_with(|| { + // First trap some assets. + let trapping_program = + Xcm::builder_unsafe().withdraw_asset((Here, SEND_AMOUNT).into()).build(); + // Even though assets are trapped, the extrinsic returns success. + assert_ok!(XcmPallet::execute( + RuntimeOrigin::signed(ALICE), + Box::new(VersionedXcm::V4(trapping_program)), + BaseXcmWeight::get() * 2, + )); + assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); + + // Expected `AssetsTrapped` event info. + let source: Location = Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + let versioned_assets = VersionedAssets::V4(Assets::from((Here, SEND_AMOUNT))); + let hash = BlakeTwo256::hash_of(&(source.clone(), versioned_assets.clone())); + + // Assets were indeed trapped. + assert_eq!( + last_events(2), + vec![ + RuntimeEvent::XcmPallet(crate::Event::AssetsTrapped { + hash, + origin: source, + assets: versioned_assets + }), + RuntimeEvent::XcmPallet(crate::Event::Attempted { + outcome: Outcome::Complete { used: BaseXcmWeight::get() * 1 } + }) + ], + ); + let trapped = AssetTraps::::iter().collect::>(); + assert_eq!(trapped, vec![(hash, 1)]); + + // Now claim them with the extrinsic. + assert_ok!(XcmPallet::claim_assets( + RuntimeOrigin::signed(ALICE), + Box::new(VersionedAssets::V4((Here, SEND_AMOUNT).into())), + Box::new(VersionedLocation::V4( + AccountId32 { network: None, id: ALICE.clone().into() }.into() + )), + )); + assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); + assert_eq!(AssetTraps::::iter().collect::>(), vec![]); + }); +} + /// Test failure to complete execution reverts intermediate side-effects. /// /// XCM program will withdraw and deposit some assets, then fail execution of a further withdraw. diff --git a/polkadot/xcm/procedural/Cargo.toml b/polkadot/xcm/procedural/Cargo.toml index 1af4d2db4f158969290b3fd1b98d3bf124236ca5..ca9fb351bd3cad1f805106e405cfdcc496c9d8a8 100644 --- a/polkadot/xcm/procedural/Cargo.toml +++ b/polkadot/xcm/procedural/Cargo.toml @@ -15,8 +15,8 @@ proc-macro = true [dependencies] proc-macro2 = "1.0.56" -quote = "1.0.28" -syn = "2.0.49" +quote = { workspace = true } +syn = { workspace = true } Inflector = "0.11.4" [dev-dependencies] diff --git a/polkadot/xcm/src/lib.rs b/polkadot/xcm/src/lib.rs index 561d5175b419376449759a92e61e0a2f12ae0294..ba8d726aecff4989417373fc9e14c174d5a5277f 100644 --- a/polkadot/xcm/src/lib.rs +++ b/polkadot/xcm/src/lib.rs @@ -165,6 +165,15 @@ macro_rules! versioned_type { <$v3>::max_encoded_len() } } + impl IdentifyVersion for $n { + fn identify_version(&self) -> Version { + use $n::*; + match self { + V3(_) => v3::VERSION, + V4(_) => v4::VERSION, + } + } + } }; ($(#[$attr:meta])* pub enum $n:ident { @@ -287,6 +296,16 @@ macro_rules! versioned_type { <$v3>::max_encoded_len() } } + impl IdentifyVersion for $n { + fn identify_version(&self) -> Version { + use $n::*; + match self { + V2(_) => v2::VERSION, + V3(_) => v3::VERSION, + V4(_) => v4::VERSION, + } + } + } }; } @@ -493,8 +512,14 @@ pub trait WrapVersion { ) -> Result, ()>; } +/// Used to get the version out of a versioned type. +// TODO(XCMv5): This could be `GetVersion` and we change the current one to `GetVersionFor`. +pub trait IdentifyVersion { + fn identify_version(&self) -> Version; +} + /// Check and return the `Version` that should be used for the `Xcm` datum for the destination -/// `MultiLocation`, which will interpret it. +/// `Location`, which will interpret it. pub trait GetVersion { fn get_version_for(dest: &latest::Location) -> Option; } @@ -572,9 +597,9 @@ pub type AlwaysLts = AlwaysV4; pub mod prelude { pub use super::{ latest::prelude::*, AlwaysLatest, AlwaysLts, AlwaysV2, AlwaysV3, AlwaysV4, GetVersion, - IntoVersion, Unsupported, Version as XcmVersion, VersionedAsset, VersionedAssetId, - VersionedAssets, VersionedInteriorLocation, VersionedLocation, VersionedResponse, - VersionedXcm, WrapVersion, + IdentifyVersion, IntoVersion, Unsupported, Version as XcmVersion, VersionedAsset, + VersionedAssetId, VersionedAssets, VersionedInteriorLocation, VersionedLocation, + VersionedResponse, VersionedXcm, WrapVersion, }; } diff --git a/polkadot/xcm/xcm-builder/src/currency_adapter.rs b/polkadot/xcm/xcm-builder/src/currency_adapter.rs index fe26b7319bb18b2d4ad33774d389afd0f834eb50..24261ac06583c4ca284f52071a4ee80afae2a6ec 100644 --- a/polkadot/xcm/xcm-builder/src/currency_adapter.rs +++ b/polkadot/xcm/xcm-builder/src/currency_adapter.rs @@ -170,7 +170,7 @@ impl< } fn can_check_out(_dest: &Location, what: &Asset, _context: &XcmContext) -> Result { - log::trace!(target: "xcm::currency_adapter", "check_out dest: {:?}, what: {:?}", _dest, what); + log::trace!(target: "xcm::currency_adapter", "can_check_out dest: {:?}, what: {:?}", _dest, what); let amount = Matcher::matches_fungible(what).ok_or(Error::AssetNotHandled)?; match CheckedAccount::get() { Some((checked_account, MintLocation::Local)) => diff --git a/polkadot/xcm/xcm-builder/src/fungible_adapter.rs b/polkadot/xcm/xcm-builder/src/fungible_adapter.rs index 33b766c8560cef798b61d564f1910ff77b2c10eb..21c828922b333e85e9332d16c768aef0d34cbd9f 100644 --- a/polkadot/xcm/xcm-builder/src/fungible_adapter.rs +++ b/polkadot/xcm/xcm-builder/src/fungible_adapter.rs @@ -151,7 +151,7 @@ impl< fn can_check_out(_dest: &Location, what: &Asset, _context: &XcmContext) -> XcmResult { log::trace!( target: "xcm::fungible_adapter", - "check_out dest: {:?}, what: {:?}", + "can_check_out dest: {:?}, what: {:?}", _dest, what ); diff --git a/polkadot/xcm/xcm-builder/src/fungibles_adapter.rs b/polkadot/xcm/xcm-builder/src/fungibles_adapter.rs index 4574d5ed4c682c7bb62b6f20b6f9397dc37e4098..b4c418ebf1c9cf7d9d26540e998f966665993585 100644 --- a/polkadot/xcm/xcm-builder/src/fungibles_adapter.rs +++ b/polkadot/xcm/xcm-builder/src/fungibles_adapter.rs @@ -235,7 +235,7 @@ impl< fn can_check_out(_origin: &Location, what: &Asset, _context: &XcmContext) -> XcmResult { log::trace!( target: "xcm::fungibles_adapter", - "can_check_in origin: {:?}, what: {:?}", + "can_check_out origin: {:?}, what: {:?}", _origin, what ); // Check we handle this asset. diff --git a/polkadot/xcm/xcm-builder/src/lib.rs b/polkadot/xcm/xcm-builder/src/lib.rs index 42522c64d8a4da77cc676e6a338d9f019ef707a5..e2af8187136e8eba2b42d7d4a667a69830556032 100644 --- a/polkadot/xcm/xcm-builder/src/lib.rs +++ b/polkadot/xcm/xcm-builder/src/lib.rs @@ -117,7 +117,7 @@ mod process_xcm_message; pub use process_xcm_message::ProcessXcmMessage; mod routing; -pub use routing::{WithTopicSource, WithUniqueTopic}; +pub use routing::{EnsureDelivery, WithTopicSource, WithUniqueTopic}; mod transactional; pub use transactional::FrameTransactionalProcessor; diff --git a/polkadot/xcm/xcm-builder/src/routing.rs b/polkadot/xcm/xcm-builder/src/routing.rs index 9c0302baee06b81ec662bc3a0e25eb9308fc0a3a..529ef80c15ff11d3c0d2629aeb4fa9506cb37a28 100644 --- a/polkadot/xcm/xcm-builder/src/routing.rs +++ b/polkadot/xcm/xcm-builder/src/routing.rs @@ -20,6 +20,7 @@ use frame_system::unique; use parity_scale_codec::Encode; use sp_std::{marker::PhantomData, result::Result}; use xcm::prelude::*; +use xcm_executor::{traits::FeeReason, FeesMode}; /// Wrapper router which, if the message does not already end with a `SetTopic` instruction, /// appends one to the message filled with a universally unique ID. This ID is returned from a @@ -104,3 +105,37 @@ impl SendXcm for WithTopicSource (Option, Option); +} + +/// Tuple implementation for `EnsureDelivery`. +#[impl_trait_for_tuples::impl_for_tuples(30)] +impl EnsureDelivery for Tuple { + fn ensure_successful_delivery( + origin_ref: &Location, + dest: &Location, + fee_reason: FeeReason, + ) -> (Option, Option) { + for_tuples!( #( + // If the implementation returns something, we're done; if not, let others try. + match Tuple::ensure_successful_delivery(origin_ref, dest, fee_reason.clone()) { + r @ (Some(_), Some(_)) | r @ (Some(_), None) | r @ (None, Some(_)) => return r, + (None, None) => (), + } + )* ); + // doing nothing + (None, None) + } +} diff --git a/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml b/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml index cafe12dc587f883a31ac3539aced38e8de29a89e..1e572e6210a27a7469f827bbe5d076c01c84f032 100644 --- a/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml +++ b/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml @@ -20,6 +20,7 @@ pallet-xcm = { path = "../../pallet-xcm" } polkadot-test-client = { path = "../../../node/test/client" } polkadot-test-runtime = { path = "../../../runtime/test-runtime" } polkadot-test-service = { path = "../../../node/test/service" } +polkadot-service = { path = "../../../node/service" } sp-consensus = { path = "../../../../substrate/primitives/consensus/common" } sp-keyring = { path = "../../../../substrate/primitives/keyring" } sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } @@ -27,6 +28,7 @@ sp-state-machine = { path = "../../../../substrate/primitives/state-machine" } xcm = { package = "staging-xcm", path = "../..", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = ".." } sp-tracing = { path = "../../../../substrate/primitives/tracing" } +sp-core = { path = "../../../../substrate/primitives/core" } [features] default = ["std"] diff --git a/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs b/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs index 79d6cb1c411b12da2a9f6b81a01b93a44c97ebe9..da7fc0d97825f24e1eea91e441cecef11a136e5f 100644 --- a/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs +++ b/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs @@ -19,12 +19,14 @@ use codec::Encode; use frame_support::{dispatch::GetDispatchInfo, weights::Weight}; +use polkadot_service::chain_spec::get_account_id_from_seed; use polkadot_test_client::{ BlockBuilderExt, ClientBlockImportExt, DefaultTestClientBuilderExt, InitPolkadotBlockBuilder, TestClientBuilder, TestClientBuilderExt, }; use polkadot_test_runtime::{pallet_test_notifier, xcm_config::XcmConfig}; use polkadot_test_service::construct_extrinsic; +use sp_core::sr25519; use sp_runtime::traits::Block; use sp_state_machine::InspectState; use xcm::{latest::prelude::*, VersionedResponse, VersionedXcm}; @@ -323,3 +325,84 @@ fn query_response_elicits_handler() { ))); }); } + +/// Simulates a cross-chain message from Parachain to Parachain through Relay Chain +/// that deposits assets into the reserve of the destination. +/// Regression test for `DepostiReserveAsset` changes in +/// +#[test] +fn deposit_reserve_asset_works_for_any_xcm_sender() { + sp_tracing::try_init_simple(); + let mut client = TestClientBuilder::new().build(); + + // Init values for the simulated origin Parachain + let amount_to_send: u128 = 1_000_000_000_000; + let assets: Assets = (Parent, amount_to_send).into(); + let fee_asset_item = 0; + let max_assets = assets.len() as u32; + let fees = assets.get(fee_asset_item as usize).unwrap().clone(); + let weight_limit = Unlimited; + let reserve = Location::parent(); + let dest = Location::new(1, [Parachain(2000)]); + let beneficiary_id = get_account_id_from_seed::("Alice"); + let beneficiary = Location::new(0, [AccountId32 { network: None, id: beneficiary_id.into() }]); + + // spends up to half of fees for execution on reserve and other half for execution on + // destination + let fee1 = amount_to_send.saturating_div(2); + let fee2 = amount_to_send.saturating_sub(fee1); + let fees_half_1 = Asset::from((fees.id.clone(), Fungible(fee1))); + let fees_half_2 = Asset::from((fees.id.clone(), Fungible(fee2))); + + let reserve_context = ::UniversalLocation::get(); + // identifies fee item as seen by `reserve` - to be used at reserve chain + let reserve_fees = fees_half_1.reanchored(&reserve, &reserve_context).unwrap(); + // identifies fee item as seen by `dest` - to be used at destination chain + let dest_fees = fees_half_2.reanchored(&dest, &reserve_context).unwrap(); + // identifies assets as seen by `reserve` - to be used at reserve chain + let assets_reanchored = assets.reanchored(&reserve, &reserve_context).unwrap(); + // identifies `dest` as seen by `reserve` + let dest = dest.reanchored(&reserve, &reserve_context).unwrap(); + // xcm to be executed at dest + let xcm_on_dest = Xcm(vec![ + BuyExecution { fees: dest_fees, weight_limit: weight_limit.clone() }, + DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }, + ]); + // xcm to be executed at reserve + let msg = Xcm(vec![ + WithdrawAsset(assets_reanchored), + ClearOrigin, + BuyExecution { fees: reserve_fees, weight_limit }, + DepositReserveAsset { assets: Wild(AllCounted(max_assets)), dest, xcm: xcm_on_dest }, + ]); + + let mut block_builder = client.init_polkadot_block_builder(); + + // Simulate execution of an incoming XCM message at the reserve chain + let execute = construct_extrinsic( + &client, + polkadot_test_runtime::RuntimeCall::Xcm(pallet_xcm::Call::execute { + message: Box::new(VersionedXcm::from(msg)), + max_weight: Weight::from_parts(1_000_000_000, 1024 * 1024), + }), + sp_keyring::Sr25519Keyring::Alice, + 0, + ); + + block_builder.push_polkadot_extrinsic(execute).expect("pushes extrinsic"); + + let block = block_builder.build().expect("Finalizes the block").block; + let block_hash = block.hash(); + + futures::executor::block_on(client.import(sp_consensus::BlockOrigin::Own, block)) + .expect("imports the block"); + + client.state_at(block_hash).expect("state should exist").inspect_state(|| { + assert!(polkadot_test_runtime::System::events().iter().any(|r| matches!( + r.event, + polkadot_test_runtime::RuntimeEvent::Xcm(pallet_xcm::Event::Attempted { + outcome: Outcome::Complete { .. } + }), + ))); + }); +} diff --git a/polkadot/xcm/xcm-executor/src/lib.rs b/polkadot/xcm/xcm-executor/src/lib.rs index b26779f3ae9da206d76823027dc193dc771cc215..c61e1e1d15bcd8fd78ffe47c415abeaf8c1cb4cd 100644 --- a/polkadot/xcm/xcm-executor/src/lib.rs +++ b/polkadot/xcm/xcm-executor/src/lib.rs @@ -826,9 +826,9 @@ impl XcmExecutor { // be weighed let to_weigh = self.holding.saturating_take(assets.clone()); self.holding.subsume_assets(to_weigh.clone()); - + let to_weigh_reanchored = Self::reanchored(to_weigh, &dest, None); let mut message_to_weigh = - vec![ReserveAssetDeposited(to_weigh.into()), ClearOrigin]; + vec![ReserveAssetDeposited(to_weigh_reanchored), ClearOrigin]; message_to_weigh.extend(xcm.0.clone().into_iter()); let (_, fee) = validate_send::(dest.clone(), Xcm(message_to_weigh))?; diff --git a/polkadot/zombienet_tests/functional/0002-parachains-disputes.toml b/polkadot/zombienet_tests/functional/0002-parachains-disputes.toml index f6bdfeb4877e1dbd5f293f8c0ec864f4f7f1b80d..2561661de1f8408b2ed2808f1f5e41b4286ce97e 100644 --- a/polkadot/zombienet_tests/functional/0002-parachains-disputes.toml +++ b/polkadot/zombienet_tests/functional/0002-parachains-disputes.toml @@ -2,9 +2,11 @@ timeout = 1000 [relaychain.genesis.runtimeGenesis.patch.configuration.config] - max_validators_per_core = 5 needed_approvals = 8 +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 5 + [relaychain.genesis.runtimeGenesis.patch.configuration.config.approval_voting_params] max_approval_coalesce_count = 5 diff --git a/polkadot/zombienet_tests/functional/0004-parachains-garbage-candidate.toml b/polkadot/zombienet_tests/functional/0004-parachains-garbage-candidate.toml index 5d6f299d46133e52c645bd128fbdcc8171490d4e..a2a2621f8426d2e8deb32f524d23e666ab1b5c00 100644 --- a/polkadot/zombienet_tests/functional/0004-parachains-garbage-candidate.toml +++ b/polkadot/zombienet_tests/functional/0004-parachains-garbage-candidate.toml @@ -2,8 +2,10 @@ timeout = 1000 bootnode = true -[relaychain.genesis.runtimeGenesis.patch.configuration.config] +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] max_validators_per_core = 1 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config] needed_approvals = 2 [relaychain] diff --git a/polkadot/zombienet_tests/functional/0005-parachains-disputes-past-session.toml b/polkadot/zombienet_tests/functional/0005-parachains-disputes-past-session.toml index e2fbec079b1a81ec50054a30ef89d049bb717482..a3bbc82e74ba6d9d5c442742afc2dd18f31c7305 100644 --- a/polkadot/zombienet_tests/functional/0005-parachains-disputes-past-session.toml +++ b/polkadot/zombienet_tests/functional/0005-parachains-disputes-past-session.toml @@ -3,8 +3,10 @@ timeout = 1000 bootnode = true [relaychain.genesis.runtimeGenesis.patch.configuration.config] - max_validators_per_core = 1 needed_approvals = 2 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 1 group_rotation_frequency = 2 [relaychain] diff --git a/polkadot/zombienet_tests/functional/0006-parachains-max-tranche0.toml b/polkadot/zombienet_tests/functional/0006-parachains-max-tranche0.toml index bef54cb8ca416fb3210849fb9801c44a31e846a4..858f87b9cfe52c10c9cff9885dbeb57b0dcba2f5 100644 --- a/polkadot/zombienet_tests/functional/0006-parachains-max-tranche0.toml +++ b/polkadot/zombienet_tests/functional/0006-parachains-max-tranche0.toml @@ -3,10 +3,12 @@ timeout = 1000 bootnode = true [relaychain.genesis.runtimeGenesis.patch.configuration.config] - max_validators_per_core = 1 needed_approvals = 7 relay_vrf_modulo_samples = 5 +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 1 + [relaychain] default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" chain = "rococo-local" diff --git a/polkadot/zombienet_tests/functional/0007-dispute-freshly-finalized.toml b/polkadot/zombienet_tests/functional/0007-dispute-freshly-finalized.toml index 69eb0804d8cb70c58fbc70abdb091af3a197d2fb..573ccf961385fa2c721caa0bb67ed97b94deb5a1 100644 --- a/polkadot/zombienet_tests/functional/0007-dispute-freshly-finalized.toml +++ b/polkadot/zombienet_tests/functional/0007-dispute-freshly-finalized.toml @@ -2,9 +2,11 @@ timeout = 1000 [relaychain.genesis.runtimeGenesis.patch.configuration.config] - max_validators_per_core = 1 needed_approvals = 1 +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 1 + [relaychain] default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" chain = "rococo-local" diff --git a/polkadot/zombienet_tests/functional/0008-dispute-old-finalized.toml b/polkadot/zombienet_tests/functional/0008-dispute-old-finalized.toml index 1ea385c3a42ee8fdf89b7595151a98c26d9b011b..ea1c93a1403fb837b45a6da01d0920c516e43ae6 100644 --- a/polkadot/zombienet_tests/functional/0008-dispute-old-finalized.toml +++ b/polkadot/zombienet_tests/functional/0008-dispute-old-finalized.toml @@ -2,9 +2,11 @@ timeout = 1000 [relaychain.genesis.runtimeGenesis.patch.configuration.config] - max_validators_per_core = 1 needed_approvals = 1 +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 1 + [relaychain] default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" chain = "rococo-local" diff --git a/polkadot/zombienet_tests/functional/0010-validator-disabling.toml b/polkadot/zombienet_tests/functional/0010-validator-disabling.toml index 6701d60d74d147d517fc55dd3f1253171f6b807f..c9d79c5f8f236918cf409fd684b7d0b8d9792d33 100644 --- a/polkadot/zombienet_tests/functional/0010-validator-disabling.toml +++ b/polkadot/zombienet_tests/functional/0010-validator-disabling.toml @@ -3,8 +3,10 @@ timeout = 1000 bootnode = true [relaychain.genesis.runtimeGenesis.patch.configuration.config] - max_validators_per_core = 1 needed_approvals = 2 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 1 group_rotation_frequency = 10 [relaychain] diff --git a/polkadot/zombienet_tests/functional/0011-async-backing-6-seconds-rate.toml b/polkadot/zombienet_tests/functional/0011-async-backing-6-seconds-rate.toml index 5a6832b149be21a48ca17fe6e84f75cfc3324ed0..b776622fdce33df2e9a78debc31ee3e62ae4805d 100644 --- a/polkadot/zombienet_tests/functional/0011-async-backing-6-seconds-rate.toml +++ b/polkadot/zombienet_tests/functional/0011-async-backing-6-seconds-rate.toml @@ -8,13 +8,16 @@ chain = "rococo-local" [relaychain.genesis.runtimeGenesis.patch.configuration.config] needed_approvals = 4 relay_vrf_modulo_samples = 6 - scheduling_lookahead = 2 - group_rotation_frequency = 4 [relaychain.genesis.runtimeGenesis.patch.configuration.config.async_backing_params] max_candidate_depth = 3 allowed_ancestry_len = 2 +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + lookahead = 2 + group_rotation_frequency = 4 + + [relaychain.default_resources] limits = { memory = "4G", cpu = "2" } requests = { memory = "2G", cpu = "1" } diff --git a/polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.toml b/polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.toml new file mode 100644 index 0000000000000000000000000000000000000000..9b3576eaa3c212bd9490095c12ae4bca65f6fa54 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.toml @@ -0,0 +1,40 @@ +[settings] +timeout = 1000 +bootnode = true + +[relaychain.genesis.runtimeGenesis.patch.configuration.config] + needed_approvals = 4 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 2 + num_cores = 2 + +[relaychain] +default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" +chain = "rococo-local" +default_command = "polkadot" + +[relaychain.default_resources] +limits = { memory = "4G", cpu = "2" } +requests = { memory = "2G", cpu = "1" } + + [[relaychain.nodes]] + name = "alice" + validator = "true" + + [[relaychain.node_groups]] + name = "validator" + count = 3 + args = [ "-lparachain=debug,runtime=debug"] + +[[parachains]] +id = 2000 +default_command = "polkadot-parachain" +add_to_genesis = false +register_para = true +onboard_as_parachain = false + + [parachains.collator] + name = "collator2000" + command = "polkadot-parachain" + args = [ "-lparachain=debug" ] diff --git a/polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.zndsl b/polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.zndsl new file mode 100644 index 0000000000000000000000000000000000000000..edc87c802a0965b6fd685db44ccbf50e807f1ed3 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.zndsl @@ -0,0 +1,19 @@ +Description: Test that a paraid acquiring multiple cores does not brick itself if ElasticScalingMVP feature is enabled in genesis +Network: ./0012-elastic-scaling-mvp.toml +Creds: config + +# Check authority status. +validator: reports node_roles is 4 + +validator: reports substrate_block_height{status="finalized"} is at least 10 within 100 seconds + +# Ensure parachain was able to make progress. +validator: parachain 2000 block height is at least 10 within 200 seconds + +# Register the second core assigned to this parachain. +alice: js-script ./0012-register-para.js return is 0 within 600 seconds + +validator: reports substrate_block_height{status="finalized"} is at least 35 within 100 seconds + +# Ensure parachain is now making progress. +validator: parachain 2000 block height is at least 30 within 200 seconds diff --git a/polkadot/zombienet_tests/functional/0012-register-para.js b/polkadot/zombienet_tests/functional/0012-register-para.js new file mode 100644 index 0000000000000000000000000000000000000000..25c7e4f5ffddcf087edab4df37e696af92fe6d3f --- /dev/null +++ b/polkadot/zombienet_tests/functional/0012-register-para.js @@ -0,0 +1,37 @@ +async function run(nodeName, networkInfo, _jsArgs) { + const { wsUri, userDefinedTypes } = networkInfo.nodesByName[nodeName]; + const api = await zombie.connect(wsUri, userDefinedTypes); + + await zombie.util.cryptoWaitReady(); + + // account to submit tx + const keyring = new zombie.Keyring({ type: "sr25519" }); + const alice = keyring.addFromUri("//Alice"); + + await new Promise(async (resolve, reject) => { + const unsub = await api.tx.sudo + .sudo(api.tx.coretime.assignCore(0, 35, [[{ task: 2000 }, 57600]], null)) + .signAndSend(alice, ({ status, isError }) => { + if (status.isInBlock) { + console.log( + `Transaction included at blockhash ${status.asInBlock}`, + ); + } else if (status.isFinalized) { + console.log( + `Transaction finalized at blockHash ${status.asFinalized}`, + ); + unsub(); + return resolve(); + } else if (isError) { + console.log(`Transaction error`); + reject(`Transaction error`); + } + }); + }); + + + + return 0; +} + +module.exports = { run }; diff --git a/polkadot/zombienet_tests/misc/0001-paritydb.toml b/polkadot/zombienet_tests/misc/0001-paritydb.toml index 399f848d3ac49e7e9950617c170c13d5c63593dd..b3ce2081b1119ec237c1b3d6e14e1e4aed8322c5 100644 --- a/polkadot/zombienet_tests/misc/0001-paritydb.toml +++ b/polkadot/zombienet_tests/misc/0001-paritydb.toml @@ -3,9 +3,11 @@ timeout = 1000 bootnode = true [relaychain.genesis.runtimeGenesis.patch.configuration.config] - max_validators_per_core = 1 needed_approvals = 3 +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 1 + [relaychain] default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" chain = "rococo-local" diff --git a/polkadot/zombienet_tests/misc/0002-upgrade-node.zndsl b/polkadot/zombienet_tests/misc/0002-upgrade-node.zndsl index db0a60ac1df617e5c89dc6a1385c4c106c1ead05..5fe1b2ad2f1a7589a28f8b1b8da05d6f35d3d59b 100644 --- a/polkadot/zombienet_tests/misc/0002-upgrade-node.zndsl +++ b/polkadot/zombienet_tests/misc/0002-upgrade-node.zndsl @@ -10,9 +10,8 @@ dave: parachain 2001 block height is at least 10 within 200 seconds # POLKADOT_PR_ARTIFACTS_URL=https://gitlab.parity.io/parity/mirrors/polkadot/-/jobs/1842869/artifacts/raw/artifacts # with the version of polkadot you want to download. -# avg 30s in our infra -alice: run ./0002-download-polkadot-from-pr.sh with "{{POLKADOT_PR_ARTIFACTS_URL}}" within 60 seconds -bob: run ./0002-download-polkadot-from-pr.sh with "{{POLKADOT_PR_ARTIFACTS_URL}}" within 60 seconds +alice: run ./0002-download-polkadot-from-pr.sh with "{{POLKADOT_PR_ARTIFACTS_URL}}" within 240 seconds +bob: run ./0002-download-polkadot-from-pr.sh with "{{POLKADOT_PR_ARTIFACTS_URL}}" within 240 seconds # update the cmd to add the flag '--insecure-validator-i-know-what-i-do' # once the base image include the version with this flag we can remove this logic. alice: run ./0002-update-cmd.sh within 60 seconds diff --git a/prdoc/1.4.0/pr_1246.prdoc b/prdoc/1.4.0/pr_1246.prdoc index a4d270c45cb5915760ddfbd60e5e4b3b7c08cd4a..3b5c2017f22acfba25471595c50015ae14c3e5cd 100644 --- a/prdoc/1.4.0/pr_1246.prdoc +++ b/prdoc/1.4.0/pr_1246.prdoc @@ -11,7 +11,7 @@ migrations: description: "Messages from the DMP dispatch queue will be moved over to the MQ pallet via `on_initialize`. This happens over multiple blocks and emits a `Completed` event at the end. The pallet can be un-deployed and deleted afterwards. Note that the migration reverses the order of messages, which should be acceptable as a one-off." crates: - - name: cumulus_pallet_xcmp_queue + - name: cumulus-pallet-xcmp-queue note: Pallet config must be altered according to the MR description. host_functions: [] diff --git a/prdoc/1.6.0/pr_2689.prdoc b/prdoc/1.6.0/pr_2689.prdoc index 847c3e8026cef9dfdf63f044a1b773be85b12921..5d3081e3a4ce71d872c8eb8a96fda5b2e273b684 100644 --- a/prdoc/1.6.0/pr_2689.prdoc +++ b/prdoc/1.6.0/pr_2689.prdoc @@ -1,7 +1,7 @@ # Schema: Parity PR Documentation Schema (prdoc) # See doc at https://github.com/paritytech/prdoc -title: BEEFY: Support compatibility with Warp Sync - Allow Warp Sync for Validators +title: "BEEFY: Support compatibility with Warp Sync - Allow Warp Sync for Validators" doc: - audience: Node Operator diff --git a/prdoc/1.6.0/pr_2771.prdoc b/prdoc/1.6.0/pr_2771.prdoc index 1b49162e4392ba1ad1a77d61e5b2289474b0ffbe..50fb99556ecddf39adbcece77d9b83e5d98651b4 100644 --- a/prdoc/1.6.0/pr_2771.prdoc +++ b/prdoc/1.6.0/pr_2771.prdoc @@ -6,4 +6,4 @@ doc: Enable better req-response protocol versioning, by allowing for fallback requests on different protocols. crates: - - name: sc_network + - name: sc-network diff --git a/prdoc/pr_1660.prdoc b/prdoc/1.8.0/pr_1660.prdoc similarity index 100% rename from prdoc/pr_1660.prdoc rename to prdoc/1.8.0/pr_1660.prdoc diff --git a/prdoc/pr_2061.prdoc b/prdoc/1.8.0/pr_2061.prdoc similarity index 100% rename from prdoc/pr_2061.prdoc rename to prdoc/1.8.0/pr_2061.prdoc diff --git a/prdoc/pr_2290.prdoc b/prdoc/1.8.0/pr_2290.prdoc similarity index 100% rename from prdoc/pr_2290.prdoc rename to prdoc/1.8.0/pr_2290.prdoc diff --git a/prdoc/1.8.0/pr_2903.prdoc b/prdoc/1.8.0/pr_2903.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..dd22fa0eea3754436e1dca0a9657eb1be8fb8151 --- /dev/null +++ b/prdoc/1.8.0/pr_2903.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Implement `ConversionToAssetBalance` in asset-rate" + +doc: + - audience: Runtime Dev + description: | + Implements the `ConversionToAssetBalance` trait to the asset-rate pallet. + + Previously only the `ConversionFromAssetBalance` trait was implemented, which would allow to convert an asset balance into the corresponding native balance. + + The `ConversionToAssetBalance` allows to use pallet-asset-rate, e.g., as a mechanism to charge XCM fees in an asset that is not the native. +crates: [ ] + diff --git a/prdoc/pr_2949-async-backing-on-all-testnet-system-chains.prdoc b/prdoc/1.8.0/pr_2949-async-backing-on-all-testnet-system-chains.prdoc similarity index 100% rename from prdoc/pr_2949-async-backing-on-all-testnet-system-chains.prdoc rename to prdoc/1.8.0/pr_2949-async-backing-on-all-testnet-system-chains.prdoc diff --git a/prdoc/pr_3007.prdoc b/prdoc/1.8.0/pr_3007.prdoc similarity index 100% rename from prdoc/pr_3007.prdoc rename to prdoc/1.8.0/pr_3007.prdoc diff --git a/prdoc/pr_3052.prdoc b/prdoc/1.8.0/pr_3052.prdoc similarity index 100% rename from prdoc/pr_3052.prdoc rename to prdoc/1.8.0/pr_3052.prdoc diff --git a/prdoc/pr_3060.prdoc b/prdoc/1.8.0/pr_3060.prdoc similarity index 100% rename from prdoc/pr_3060.prdoc rename to prdoc/1.8.0/pr_3060.prdoc diff --git a/prdoc/pr_3079.prdoc b/prdoc/1.8.0/pr_3079.prdoc similarity index 100% rename from prdoc/pr_3079.prdoc rename to prdoc/1.8.0/pr_3079.prdoc diff --git a/prdoc/pr_3154.prdoc b/prdoc/1.8.0/pr_3154.prdoc similarity index 100% rename from prdoc/pr_3154.prdoc rename to prdoc/1.8.0/pr_3154.prdoc diff --git a/prdoc/pr_3160.prdoc b/prdoc/1.8.0/pr_3160.prdoc similarity index 100% rename from prdoc/pr_3160.prdoc rename to prdoc/1.8.0/pr_3160.prdoc diff --git a/prdoc/pr_3166.prdoc b/prdoc/1.8.0/pr_3166.prdoc similarity index 100% rename from prdoc/pr_3166.prdoc rename to prdoc/1.8.0/pr_3166.prdoc diff --git a/prdoc/pr_3184.prdoc b/prdoc/1.8.0/pr_3184.prdoc similarity index 100% rename from prdoc/pr_3184.prdoc rename to prdoc/1.8.0/pr_3184.prdoc diff --git a/prdoc/pr_3212.prdoc b/prdoc/1.8.0/pr_3212.prdoc similarity index 100% rename from prdoc/pr_3212.prdoc rename to prdoc/1.8.0/pr_3212.prdoc diff --git a/prdoc/pr_3225.prdoc b/prdoc/1.8.0/pr_3225.prdoc similarity index 100% rename from prdoc/pr_3225.prdoc rename to prdoc/1.8.0/pr_3225.prdoc diff --git a/prdoc/pr_3230.prdoc b/prdoc/1.8.0/pr_3230.prdoc similarity index 100% rename from prdoc/pr_3230.prdoc rename to prdoc/1.8.0/pr_3230.prdoc diff --git a/prdoc/pr_3232.prdoc b/prdoc/1.8.0/pr_3232.prdoc similarity index 100% rename from prdoc/pr_3232.prdoc rename to prdoc/1.8.0/pr_3232.prdoc diff --git a/prdoc/pr_3243.prdoc b/prdoc/1.8.0/pr_3243.prdoc similarity index 100% rename from prdoc/pr_3243.prdoc rename to prdoc/1.8.0/pr_3243.prdoc diff --git a/prdoc/pr_3244.prdoc b/prdoc/1.8.0/pr_3244.prdoc similarity index 100% rename from prdoc/pr_3244.prdoc rename to prdoc/1.8.0/pr_3244.prdoc diff --git a/prdoc/pr_3272.prdoc b/prdoc/1.8.0/pr_3272.prdoc similarity index 100% rename from prdoc/pr_3272.prdoc rename to prdoc/1.8.0/pr_3272.prdoc diff --git a/prdoc/pr_3301.prdoc b/prdoc/1.8.0/pr_3301.prdoc similarity index 100% rename from prdoc/pr_3301.prdoc rename to prdoc/1.8.0/pr_3301.prdoc diff --git a/prdoc/pr_3308.prdoc b/prdoc/1.8.0/pr_3308.prdoc similarity index 100% rename from prdoc/pr_3308.prdoc rename to prdoc/1.8.0/pr_3308.prdoc diff --git a/prdoc/pr_3319.prdoc b/prdoc/1.8.0/pr_3319.prdoc similarity index 100% rename from prdoc/pr_3319.prdoc rename to prdoc/1.8.0/pr_3319.prdoc diff --git a/prdoc/pr_3325.prdoc b/prdoc/1.8.0/pr_3325.prdoc similarity index 100% rename from prdoc/pr_3325.prdoc rename to prdoc/1.8.0/pr_3325.prdoc diff --git a/prdoc/pr_3358.prdoc b/prdoc/1.8.0/pr_3358.prdoc similarity index 100% rename from prdoc/pr_3358.prdoc rename to prdoc/1.8.0/pr_3358.prdoc diff --git a/prdoc/1.8.0/pr_3361.prdoc b/prdoc/1.8.0/pr_3361.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..65baa9e94a0e38a4af16bf6f69aab7526c4ad920 --- /dev/null +++ b/prdoc/1.8.0/pr_3361.prdoc @@ -0,0 +1,10 @@ +title: Fix double charge of host function weight + +doc: + - audience: Runtime Dev + description: | + Fixed a double charge which can lead to quadratic gas consumption of + the `call` and `instantiate` host functions. + +crates: + - name: pallet-contracts diff --git a/prdoc/1.8.0/pr_3364.prdoc b/prdoc/1.8.0/pr_3364.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..1e7a6a5278d730a71a0bf5a928815812d8576386 --- /dev/null +++ b/prdoc/1.8.0/pr_3364.prdoc @@ -0,0 +1,12 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: rpc server expose batch request configuration + +doc: + - audience: Node Operator + description: | + Add functionality to limit RPC batch requests by two new CLI options: + --rpc-disable-batch-request - disable batch requests on the server + --rpc-max-batch-request-len - limit batches to LEN on the server +crates: [ ] diff --git a/prdoc/1.8.0/pr_3370.prdoc b/prdoc/1.8.0/pr_3370.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..f40d3821b9fed3eae49803128a9ff61058cd5d02 --- /dev/null +++ b/prdoc/1.8.0/pr_3370.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Remove `key` getter from pallet-sudo" + +doc: + - audience: Runtime Dev + description: | + Removed the `key` getter function from the sudo pallet. There is no replacement for getting + the key currently. + +crates: + - name: pallet-sudo diff --git a/prdoc/1.8.0/pr_3384.prdoc b/prdoc/1.8.0/pr_3384.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..c19464977582a37b11760c81491a7ea7cf3ebced --- /dev/null +++ b/prdoc/1.8.0/pr_3384.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "[pallet_contracts] stabilize `call_v2`, `instantiate_v2`, `lock_dependency` and `unlock_dependency`" + +doc: + - audience: Runtime Dev + description: | + These APIs are currently unstable and are being stabilized in this PR. + Note: `add_delegate_dependency` and `remove_delegate_dependency` have been renamed to `lock_dependency` and `unlock_dependency` respectively. + +crates: + - name: pallet-contracts-uapi + - name: pallet-contracts diff --git a/prdoc/1.8.0/pr_3395.prdoc b/prdoc/1.8.0/pr_3395.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..a10fb84fd7f56211602fadaf656a25ea5d382255 --- /dev/null +++ b/prdoc/1.8.0/pr_3395.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "`benchmarking-cli` `pallet` subcommand: refactor `--list` and add `--all` option" + +doc: + - audience: Node Dev + description: | + `pallet` subcommand's `--list` now accepts two values: "all" and "pallets". The former will list all available benchmarks, the latter will list only pallets. + Also adds `--all` to run all the available benchmarks and `--no-csv-header` to omit the csv-style header in the output. + NOTE: changes are backward compatible. + +crates: + - name: frame-benchmarking-cli diff --git a/prdoc/1.8.0/pr_3415.prdoc b/prdoc/1.8.0/pr_3415.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..c56a5d3ffaa129d4aba36e1e11905e9d72b936cb --- /dev/null +++ b/prdoc/1.8.0/pr_3415.prdoc @@ -0,0 +1,9 @@ +title: "[pallet-contracts] Add APIVersion to the config." + +doc: + - audience: Runtime Dev + description: | + Add `APIVersion` to the config to communicate the state of the Host functions exposed by the pallet. + +crates: + - name: pallet-contracts diff --git a/prdoc/1.8.0/pr_3435.prdoc b/prdoc/1.8.0/pr_3435.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..7d7896bff7d405a0a1dd34f09bc2aff0834b9ff2 --- /dev/null +++ b/prdoc/1.8.0/pr_3435.prdoc @@ -0,0 +1,16 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Fix BEEFY-related gossip messages error logs + +doc: + - audience: Node Operator + description: | + Added logic to pump the gossip engine while waiting for other things + to make sure gossiped messages get consumed (practically discarded + until worker is fully initialized). + This fixes an issue where node operators saw error logs, and fixes + potential RAM bloat when BEEFY initialization takes a long time + (i.e. during clean sync). +crates: + - name: sc-consensus-beefy diff --git a/prdoc/1.8.0/pr_3477.prdoc b/prdoc/1.8.0/pr_3477.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..26e96d3635b14634ca523797d741ddaf224aa9ae --- /dev/null +++ b/prdoc/1.8.0/pr_3477.prdoc @@ -0,0 +1,11 @@ +title: Allow parachain which acquires multiple coretime cores to make progress + +doc: + - audience: Node Operator + description: | + Adds the needed changes so that parachains which acquire multiple coretime cores can still make progress. + Only one of the cores will be able to be occupied at a time. + Only works if the ElasticScalingMVP node feature is enabled in the runtime and the block author validator is + updated to include this change. + +crates: [ ] diff --git a/prdoc/pr_1378.prdoc b/prdoc/pr_1378.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..6533dcb663030718fdcd3ffc353b2ae8b2a92407 --- /dev/null +++ b/prdoc/pr_1378.prdoc @@ -0,0 +1,27 @@ +title: Construct Runtime V2 - An outer macro approach to define the runtime + +doc: + - audience: Runtime Dev + description: | + Introduces `#[frame_support::runtime]` that can be attached to a mod to define a runtime. The items + in this mod can be attached to the following attributes to define the key components of the runtime. + 1. `#[runtime::runtime]` attached to a struct defines the main runtime + 2. `#[runtime::derive]` attached to the runtime struct defines the types generated by the runtime + 3. `#[runtime::pallet_index]` must be attached to a pallet to define its index + 4. `#[runtime::disable_call]` can be optionally attached to a pallet to disable its calls + 5. `#[runtime::disable_unsigned]` can be optionally attached to a pallet to disable unsigned calls + 6. A pallet instance can be defined as `TemplateModule: pallet_template` + An optional attribute can be defined as `#[frame_support::runtime(legacy_ordering)]` to ensure that + the order of hooks is same as the order of pallets (and not based on the pallet_index). This is to support + legacy runtimes and should be avoided for new ones. + +migrations: + db: [] + + runtime: [] + +crates: + - name: frame-support + - name: frame-support-procedural + +host_functions: [] diff --git a/prdoc/pr_1554.prdoc b/prdoc/pr_1554.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..bfce7c5edafd9df39ff9235f18536591b1d47158 --- /dev/null +++ b/prdoc/pr_1554.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Runtime Upgrade ref docs and Single Block Migration example pallet + +doc: + - audience: Runtime Dev + description: | + `frame_support::traits::GetStorageVersion::current_storage_version` has been renamed `frame_support::traits::GetStorageVersion::in_code_storage_version`. + A simple find-replace is sufficient to handle this change. + +crates: + - name: "frame-support" + diff --git a/prdoc/pr_1781.prdoc b/prdoc/pr_1781.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..e3560842d15ab249539ac2de172dc5914e92cc19 --- /dev/null +++ b/prdoc/pr_1781.prdoc @@ -0,0 +1,45 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Multi-Block-Migrations, `poll` hook and new System Callbacks" + +doc: + - audience: Runtime Dev + description: | + The major things that this MR touches are: + + **Multi-Block-Migrations**: `pallet-migrations` is introduced that can be configured in the + `System` of a runtime to act as multi-block migrator. The `migrations` pallet then in turn + receives the list of MBMs as config parameter. The list of migrations can be an aggregated + tuple of `SteppedMigration` trait implementation. + It is paramount that the `migrations` pallet is configured in `System` once it is deployed. A + test is in place to double check this. + + To integrate this into your runtime, it is only necessary to change the return type of + `initialize_block` to `RuntimeExecutiveMode`. For extended info please see + https://github.com/paritytech/polkadot-sdk/pull/1781. + + **poll**: a new pallet hook named `poll` is added. This can be used for places where the code + that should be executed is not deadline critical. Runtime devs are advised to skim their usage + of `on_initialize` and `on_finalize` to see whether they can be replace with `poll`. `poll` is + not guaranteed to be called each block. In fact it will not be called when MBMs are ongoing. + + **System Callbacks**: The `system` pallet gets five new config items - all of which can be + safely set to `()` as default. They are: + - `SingleBlockMigrations`: replaces the `Executive` now for configuring migrations. + - `MultiBlockMigrator`: the `pallet-migrations` would be set here, if deployed. + - `PreInherents`: a hook that runs before any inherent. + - `PostInherents`: a hook to run between inherents and `poll`/MBM logic. + - `PostTransactions`: a hook to run after all transactions but before `on_idle`. + +crates: + - name: frame-executive + - name: frame-system + - name: frame-support + - name: frame-support-procedural + - name: pallet-migrations + - name: sc-basic-authorship + - name: sc-block-builder + - name: sp-api + - name: sp-api-proc-macro + - name: sp-runtime diff --git a/prdoc/pr_2393.prdoc b/prdoc/pr_2393.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..708d017fafd50d103c0470997c3a7571e0ff30c1 --- /dev/null +++ b/prdoc/pr_2393.prdoc @@ -0,0 +1,16 @@ +title: "[XCMP] Use the number of 'ready' pages in XCMP suspend logic" + +doc: + - audience: Runtime Dev + description: | + Semantics of the suspension logic in the XCMP queue pallet change from using the number of + total pages to the number of 'ready' pages. The number of ready pages is now also exposed by + the `MessageQueue` pallet to downstream via the queue `footprint`. + +crates: + - name: cumulus-pallet-xcmp-queue + bump: patch + - name: pallet-message-queue + bump: patch + - name: frame-support + bump: major diff --git a/prdoc/pr_3002.prdoc b/prdoc/pr_3002.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..511a07e39c47d5055bd161fda0e748f96e4a9997 --- /dev/null +++ b/prdoc/pr_3002.prdoc @@ -0,0 +1,29 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: PoV Reclaim Runtime Side +author: skunert +topic: runtime +doc: + - audience: Runtime Dev + description: | + Adds a mechanism to reclaim proof size weight. + 1. Introduces a new `SignedExtension` that reclaims the difference + between benchmarked proof size weight and actual consumed proof size weight. + 2. Introduces a manual mechanism, `StorageWeightReclaimer`, to reclaim excess storage weight for situations + that require manual weight management. The most prominent case is the `on_idle` hook. + 3. Adds the `storage_proof_size` host function to the PVF. Parachain nodes should add it to ensure compatibility. + + To enable proof size reclaiming, add the host `storage_proof_size` host function to the parachain node. Add the + `StorageWeightReclaim` `SignedExtension` to your runtime and enable proof recording during block import. + + +crates: + - name: "cumulus-primitives-storage-weight-reclaim" +host_functions: + - name: "storage_proof_size" + description: | + This host function is used to pass the current size of the storage proof to the runtime. + It was introduced before but becomes relevant now. + Note: This host function is intended to be used through `cumulus_primitives_storage_weight_reclaim::get_proof_size`. + Direct usage is not recommended. diff --git a/prdoc/pr_3187.prdoc b/prdoc/pr_3187.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..bda41142d86c8f9c1af8398e0cf21cdfeebebfa7 --- /dev/null +++ b/prdoc/pr_3187.prdoc @@ -0,0 +1,16 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Retrying an execution on failed runtime construction + +doc: + - audience: Node Dev + description: | + If a runtime construction error happened during the execution request, then the artifact is re-prepared + and the execution request is retried at most once. See also the related issue. + +crates: + - name: polkadot-node-core-candidate-validation + - name: polkadot-node-core-pvf + - name: polkadot-node-core-pvf-execute-worker + - name: polkadot-node-core-pvf-common diff --git a/prdoc/pr_3231.prdoc b/prdoc/pr_3231.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..26e96d3635b14634ca523797d741ddaf224aa9ae --- /dev/null +++ b/prdoc/pr_3231.prdoc @@ -0,0 +1,11 @@ +title: Allow parachain which acquires multiple coretime cores to make progress + +doc: + - audience: Node Operator + description: | + Adds the needed changes so that parachains which acquire multiple coretime cores can still make progress. + Only one of the cores will be able to be occupied at a time. + Only works if the ElasticScalingMVP node feature is enabled in the runtime and the block author validator is + updated to include this change. + +crates: [ ] diff --git a/prdoc/pr_3233.prdoc b/prdoc/pr_3233.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..ed4e8cce31f75b4bd98f91f7d15ec3fae0761354 --- /dev/null +++ b/prdoc/pr_3233.prdoc @@ -0,0 +1,12 @@ +title: "provisioner: allow multiple cores assigned to the same para" + +topic: Node + +doc: + - audience: Node Dev + description: | + Enable supplying multiple backable candidates to the paras_inherent pallet for the same paraid. + +crates: + - name: polkadot-node-core-prospective-parachains + - name: polkadot-node-core-provisioner diff --git a/prdoc/pr_3324.prdoc b/prdoc/pr_3324.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..0425fbf317c8620125e9fe64f96a0eb99e4595f0 --- /dev/null +++ b/prdoc/pr_3324.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Fix weight calculation and event emission in pallet-membership" + +doc: + - audience: Runtime Dev + description: | + Bug fix for the membership pallet to use correct weights. Also no event will be emitted + anymore when `change_key` is called with identical accounts. + +crates: + - name: pallet-membership diff --git a/prdoc/pr_3371.prdoc b/prdoc/pr_3371.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..605d540772f0b249962088463237dda2e6690d6c --- /dev/null +++ b/prdoc/pr_3371.prdoc @@ -0,0 +1,19 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: removed `pallet::getter` from example pallets + +doc: + - audience: Runtime Dev + description: | + This PR removes all the `pallet::getter` usages from the template pallets found in the Substrate and Cumulus template nodes, and from the Substrate example pallets. + The purpose is to discourage developers to use this macro, that is currently being removed and soon will be deprecated. + +crates: + - name: pallet-template + - name: pallet-parachain-template + - name: pallet-example-basic + - name: pallet-example-kitchensink + - name: pallet-example-offchain-worker + - name: pallet-example-split + diff --git a/prdoc/pr_3377.prdoc b/prdoc/pr_3377.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..8e5b3935512b1b2dd12836e940a5d95040df8cf2 --- /dev/null +++ b/prdoc/pr_3377.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Permissioned contract deployment + +doc: + - audience: Runtime Dev + description: | + This PR introduces two new config types that specify the origins allowed to + upload and instantiate contract code. However, this check is not enforced when + a contract instantiates another contract. + +crates: +- name: pallet-contracts diff --git a/prdoc/pr_3378.prdoc b/prdoc/pr_3378.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..2470fc158519e31e297a7cbefcae3bc54eb8f708 --- /dev/null +++ b/prdoc/pr_3378.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Remove deprecated GenesisConfig + +doc: + - audience: Runtime Dev + description: | + Removes deprecated type `GenesisConfig`, it was replaced by `RuntimeGenesisConfig` on May 24 of 2023. + The type `GenesisConfig` was deprecated on May 24 of 2023 [#14210](https://github.com/paritytech/substrate/pull/14210) + +crates: + - name: frame-support-procedural \ No newline at end of file diff --git a/prdoc/pr_3403.prdoc b/prdoc/pr_3403.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..086e769da3c39c4d3def42791c18a82948676386 --- /dev/null +++ b/prdoc/pr_3403.prdoc @@ -0,0 +1,22 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Add `claim_assets` extrinsic to `pallet-xcm`" + +doc: + - audience: Runtime User + description: | + There's a new extrinsic in `pallet-xcm` for claiming assets. + This means that if your assets ever get trapped while teleporting or doing reserve asset transfers, + you can easily claim them by calling this new extrinsic. + - audience: Runtime Dev + description: | + There's a new extrinsic in `pallet-xcm` that needs a new configuration item for its benchmarks. + It's a simple function in `pallet_xcm::benchmarking::Config`, `get_asset`, that returns a valid asset + handled by the AssetTransactor of the chain. + If you're already using `pallet-xcm-benchmarks`, then you already have this function there and can + just copy and paste it. + +crates: + - name: pallet-xcm + - name: staging-xcm diff --git a/prdoc/pr_3411.prdoc b/prdoc/pr_3411.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..d847d6756ac7f019cf12e4d9e7993ebeda1273ab --- /dev/null +++ b/prdoc/pr_3411.prdoc @@ -0,0 +1,11 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: add Encointer as trusted teleporter for Westend + +doc: + - audience: Runtime Dev + description: | + add Encointer as trusted teleporter for Westend with ParaId 1003 + +crates: [ ] diff --git a/prdoc/pr_3412.prdoc b/prdoc/pr_3412.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..1ee6edfeb837f81646d32633ed009d2cded4dcfe --- /dev/null +++ b/prdoc/pr_3412.prdoc @@ -0,0 +1,17 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "[FRAME] Add genesis test and remove some checks" + +doc: + - audience: Runtime Dev + description: | + The construct_runtime macro now generates a test to assert that all `GenesisConfig`s of all + pallets can be build within the runtime. This ensures that the `BuildGenesisConfig` runtime + API works. + Further, some checks from a few pallets were removed to make this pass. + +crates: + - name: pallet-babe + - name: cumulus-pallet-aura-ext + - name: pallet-session diff --git a/prdoc/pr_3447.prdoc b/prdoc/pr_3447.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..1d8d4f409f77cf29247d35d196bccff390896881 --- /dev/null +++ b/prdoc/pr_3447.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Use generic hash for runtime wasm in resolve_state_version_from_wasm + +doc: + - audience: Node Dev + description: | + Changes the runtime hash algorithm used in resolve_state_version_from_wasm from DefaultHasher to a caller-provided + one (usually HashingFor). Fixes a bug where the runtime wasm was being compiled again when it was not + needed, because the hash did not match +crates: + - name: sc-chain-spec diff --git a/prdoc/pr_3453.prdoc b/prdoc/pr_3453.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..1f01440f722fb9984231f690a20c517b9d4bf0a1 --- /dev/null +++ b/prdoc/pr_3453.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "[pallet-nomination-pools]: `chill` is permissionless if depositor's stake is less than `min_nominator_bond`" + +doc: + - audience: Runtime Dev + description: | + Nomination pools currently have an issue whereby member funds cannot be unbonded if the depositor bonded amount is less than `MinNominatorBond`. + This PR makes the `chill` function permissionless if this condition is met. + Consequently, `nominate` function also checks for `depositor` to have at least `MinNominatorBond`, and returns `MinimumBondNotMet` error if not. +crates: + - name: pallet-nomination-pools diff --git a/prdoc/pr_3454.prdoc b/prdoc/pr_3454.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..7f564258f23a57f3784c2ee055176153f006734f --- /dev/null +++ b/prdoc/pr_3454.prdoc @@ -0,0 +1,19 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Introduce storage attr macro #[disable_try_decode_storage] and set it on System::Events and ParachainSystem::HostConfiguration" + +doc: + - audience: Runtime Dev + description: | + Allows marking storage items with \#[disable_try_decode_storage], which disables that storage item from being decoded + during try_decode_entire_state calls. + + Applied the attribute to System::Events to close https://github.com/paritytech/polkadot-sdk/issues/2560. + Applied the attribute to ParachainSystem::HostConfiguration to resolve periodic issues with it. + +crates: + - name: frame-support-procedural + - name: frame-system + - name: cumulus-pallet-parachain-system + diff --git a/prdoc/pr_3456.prdoc b/prdoc/pr_3456.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..c7327e17e57d74614fd1b593cc222faf4a87192d --- /dev/null +++ b/prdoc/pr_3456.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Removed `pallet::getter` usage from `pallet-collective` + +doc: + - audience: Runtime Dev + description: | + This PR removes `pallet::getter` usage from `pallet-collective`, and updates dependant code accordingly. + The syntax `StorageItem::::get()` should be used instead. + +crates: + - name: pallet-collective + - name: kitchensink-runtime + - name: collectives-westend-runtime diff --git a/prdoc/pr_3460.prdoc b/prdoc/pr_3460.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..1f16fe0890836dd25bfcaa75d5fc647688230dd8 --- /dev/null +++ b/prdoc/pr_3460.prdoc @@ -0,0 +1,26 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Repot all templates + +doc: + - audience: Runtime Dev + description: | + This PR moves all templates into a single folder in the polkadot-sdk repo (`/templates`) and + unifies their crate names as well. Most notably, the crate name for what was formerly known + as `node-template` is no `solochain-template-node`. The other two crates in the template are + consequently called: `solochain-runtime-template` and `pallet-solochain-template`. + The other two template crate names follow a similar patter, just replacing `solochain` with + `parachain` or `minimal`. + + This PR is part of a bigger step toward automating the template repositories, see the + following: https://github.com/paritytech/polkadot-sdk/issues/3155 + +# the following crates are removed and renamed, although none are released. +crates: + - name: minimal-template-runtime # formerly called minimal-runtime + - name: minimal-template-node # formerly called minimal-node + - name: solochain-template-node # formerly called node-template + - name: solochain-template-runtime # formerly called node-template-runtime + - name: parachain-template-runtime # formerly called parachain-runtime + - name: parachain-template-runtime # formerly called parachain-node diff --git a/prdoc/pr_3491.prdoc b/prdoc/pr_3491.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..e36afb916a6226c3609b1e2eb8aacf6af8127d32 --- /dev/null +++ b/prdoc/pr_3491.prdoc @@ -0,0 +1,12 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Remove Deprecated OldWeight + +doc: + - audience: Runtime Dev + description: | + Removed deprecated sp_weights::OldWeight type. Use [`weight_v2::Weight`] instead. + +crates: + - name: frame-support diff --git a/prdoc/pr_3504.prdoc b/prdoc/pr_3504.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..90a8ddd38b8f7f8159199ac6423ffee3dc645291 --- /dev/null +++ b/prdoc/pr_3504.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: add prometheus label "is_rate_limited" to rpc calls + +doc: + - audience: Node Operator + description: | + This PR adds a label "is_rate_limited" to the prometheus metrics "substrate_rpc_calls_time" and "substrate_rpc_calls_finished" + than can be used to distinguish rate-limited RPC calls from other RPC calls. Because rate-limited RPC calls may take + tens of seconds. + +crates: [ ] diff --git a/prdoc/pr_3505.prdoc b/prdoc/pr_3505.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..08ad909739c7d761371e0e1e40d575794fc4e217 --- /dev/null +++ b/prdoc/pr_3505.prdoc @@ -0,0 +1,36 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Removes `as [disambiguation_path]` from the required syntax in `derive_impl` + +doc: + - audience: Runtime Dev + description: | + This PR removes the need to specify `as [disambiguation_path]` for cases where the trait + definition resides within the same scope as default impl path. + + For example, in the following macro invocation + ```rust + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Runtime { + ... + } + ``` + the trait `DefaultConfig` lies within the `frame_system` scope and `TestDefaultConfig` impls + the `DefaultConfig` trait. + + Using this information, we can compute the disambiguation path internally, thus removing the + need of an explicit specification: + ```rust + #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] + impl frame_system::Config for Runtime { + ... + } + ``` + + In cases where the trait lies outside this scope, we would still need to specify it explicitly, + but this should take care of most (if not all) uses of `derive_impl` within FRAME's context. + +crates: + - name: frame-support-procedural + - name: pallet-default-config-example diff --git a/prdoc/pr_3510.prdoc b/prdoc/pr_3510.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..6ee2f9a81f750ef0c64a2b8d8197e1e44d7e9f0c --- /dev/null +++ b/prdoc/pr_3510.prdoc @@ -0,0 +1,13 @@ +title: "Fix multi-collator parachain transition to async backing" + +doc: + - audience: Node Operator + description: | + The dynamic Aura slot duration, introduced in PR#3211, didn't take the block import pipeline + into account. The result was the parachain backed by multiple collators not being able to + keep producing blocks after its runtime was upgraded to support async backing, requiring to + restart all the collator nodes. This change fixes the issue, introducing the dynamic Aura + slot duration into the block import pipeline. + +crates: + - name: "polkadot-parachain-bin" diff --git a/prdoc/pr_3513.prdoc b/prdoc/pr_3513.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..e1f2afe280a5bc2e8d341f9ab5ac73315f1676e8 --- /dev/null +++ b/prdoc/pr_3513.prdoc @@ -0,0 +1,18 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Fix call enum's metadata regression + +doc: + - audience: Runtime Dev + - audience: Runtime User + description: | + This PR fixes an issue where in the metadata of all FRAME-based chains, the documentation for + all extrinsic/call/transactions has been replaced by a single line saying "see Pallet::name". + + Many wallets display the metadata of transactions to the user before signing, and this bug + might have affected this process. + +crates: + - name: frame + - name: frame-support diff --git a/prdoc/pr_3523.prdoc b/prdoc/pr_3523.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..c31d9d096dc00797254f7946fd44984636cb1547 --- /dev/null +++ b/prdoc/pr_3523.prdoc @@ -0,0 +1,19 @@ +title: Fix crash of synced parachain node run with `--sync=warp` + +doc: + - audience: Node Operator + description: | + Fix crash of `SyncingEngine` when an already synced parachain node is run with `--sync=warp` + (issue https://github.com/paritytech/polkadot-sdk/issues/3496). + The issue manifests itself by errors in the logs: + ``` + [Parachain] Cannot set warp sync target block: no warp sync strategy is active. + [Parachain] Failed to set warp sync target block header, terminating `SyncingEngine`. + ``` + Followed by a stream of messages: + ``` + [Parachain] Protocol command streams have been shut down + ``` + +crates: + - name: sc-network-sync diff --git a/prdoc/pr_3532.prdoc b/prdoc/pr_3532.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..d47c8d1d49156aca4ac1b7683ee7fbd9d8e9e690 --- /dev/null +++ b/prdoc/pr_3532.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Remove deprecated `trait Store` + +doc: + - audience: Runtime Dev + description: | + The deprecated `trait Store` feature has been removed from the codebase. Please remove usages of `generate_store` + macro from your pallets and access the storage through generics. For example, + `::StoredRange::mutate` will need to be updated to `StoredRange::::mutate`. + +crates: + - name: frame-support + - name: frame \ No newline at end of file diff --git a/prdoc/pr_3540.prdoc b/prdoc/pr_3540.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..d0a91882b6bf4e357fde899c2d939ddb7de62f34 --- /dev/null +++ b/prdoc/pr_3540.prdoc @@ -0,0 +1,10 @@ +title: "[pallet-contracts] Only allow non-deterministic code to be uploaded with Determinism::Relaxed" + +doc: + - audience: Runtime Dev + description: | + The `upload_code` extrinsic, will now only allow non-deterministic code to be uploaded with the `Determinism::Relaxed` flag. + This prevent an attacker from uploading "deterministic" code with the `Determinism::Relaxed` flag, preventing the code to be instantiated for on-chain execution. + +crates: + - name: pallet-contracts diff --git a/prdoc/pr_3574.prdoc b/prdoc/pr_3574.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..99868ea8a132a6440b9daff0d9f7deaa329ca4b3 --- /dev/null +++ b/prdoc/pr_3574.prdoc @@ -0,0 +1,23 @@ +title: Generate test functions for each benchmark with benchmarking v2 + +doc: + - audience: Runtime Dev + description: | + This PR fixes an issue where using `impl_benchmark_test_suite` macro + within modules that use the benchmarking v2 macros (`#[benchmarks]` + and `#[instance_benchmarks]`) always produced a single test called + `test_benchmarks` instead of a separate benchmark test for every + benchmark (noted with the `#[benchmark]` macro). + + By using this macro from now on, new tests will be created named + `test_benchmark_{name}` where `name` is the name of the benchmark + function. Those tests will be nested inside the module intended for + benchmark functions. + + Also, when using `impl_benchmark_test_suite` inside the module, + the import of such marco will not be necessary, so any explicit + import of it will be marked as unused, the same way it works for + v1 macros so far. + +crates: + - name: frame-support-procedural diff --git a/prdoc/pr_3589.prdoc b/prdoc/pr_3589.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..98b9b1039ab49f1dc62580d56327bf7944ed3372 --- /dev/null +++ b/prdoc/pr_3589.prdoc @@ -0,0 +1,10 @@ +title: Removed `pallet::getter` usage from `pallet-im-online` + +doc: + - audience: Runtime Dev + description: | + This PR removes `pallet::getter` usage from `pallet-im-online`, and updates dependant code accordingly. + The syntax `StorageItem::::get()` should be used instead. + +crates: + - name: pallet-im-online diff --git a/prdoc/pr_3606.prdoc b/prdoc/pr_3606.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..18b71de9477ea516de2bcd5ecfab52a6a5479afb --- /dev/null +++ b/prdoc/pr_3606.prdoc @@ -0,0 +1,9 @@ +title: "[pallet_contracts] mark lock/unlock_delegate_dependency as stable" + +doc: + - audience: Runtime Dev + description: | + Lock and unlock delegate dependency are stable now, so we can mark them as such. + +crates: + - name: pallet-contracts diff --git a/prdoc/pr_3636.prdoc b/prdoc/pr_3636.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..934158ac1022dc14309696bea43492db8f7a0e88 --- /dev/null +++ b/prdoc/pr_3636.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "[pallet_broker] Fix `Linear::adapt_price` behavior at zero" + +doc: + - audience: Runtime Dev + description: | + This fixes the behaviour of `Linear` which is the default implementation of the `AdaptPrice` + trait in the broker pallet. Previously if cores were offered but not sold in only one sale, + the price would be set to zero and due to the logic being purely multiplicative, the price + would stay at 0 indefinitely. + +crates: + - name: pallet-broker diff --git a/prdoc/pr_3643.prdoc b/prdoc/pr_3643.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..6e85af8d99a07f397ce03d4b7c936f7560f36c8f --- /dev/null +++ b/prdoc/pr_3643.prdoc @@ -0,0 +1,12 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Fix weight refund for `pallet_collator_selection::set_candidacy_bond` + +doc: + - audience: Runtime Dev + description: | + This PR implements the weight refund of `pallet_collator_selection::set_candidacy_bond` to + account for no iterations over the candidate list when the candidacy bond is decreased. +crates: + - name: pallet-collator-selection diff --git a/prdoc/pr_3665.prdoc b/prdoc/pr_3665.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..67725d24d185502871baf41ba65b856449c4dfc3 --- /dev/null +++ b/prdoc/pr_3665.prdoc @@ -0,0 +1,11 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Revert "FRAME Create TransactionExtension as a replacement for SignedExtension (#2280)" + +doc: + - audience: Runtime Dev + description: | + This PR reverts the PR which introduced `TransactionExtension` to replace `SignedExtension`. + +crates: [ ] diff --git a/prdoc/schema_user.json b/prdoc/schema_user.json index 82215d51866b35895b5e840a8f3a900b161a9cf6..1bd0b3b93ee45ea75d9781ef1485898dd86c7ff3 100644 --- a/prdoc/schema_user.json +++ b/prdoc/schema_user.json @@ -3,9 +3,8 @@ "$id": "https://raw.githubusercontent.com/paritytech/prdoc/master/prdoc_schema_user.json", "version": { "major": 1, - "minor": 0, - "patch": 0, - "timestamp": 20230817152351 + "minor": 1, + "patch": 0 }, "title": "Polkadot SDK PRDoc Schema", "description": "JSON Schema definition for the Polkadot SDK PR documentation", @@ -125,10 +124,16 @@ "name": { "type": "string" }, + "bump": { + "$ref": "#/$defs/semver_bump" + }, "note": { "type": "string" } - } + }, + "required": [ + "name" + ] }, "migration_db": { "type": "object", @@ -165,6 +170,26 @@ "description" ] }, + "semver_bump": { + "description": "The type of bump to apply to the crate version according to Cargo SemVer: https://doc.rust-lang.org/cargo/reference/semver.html. Please check docs/RELEASE.md for more information.", + "oneOf": [ + { + "const": "major", + "title": "Major", + "description": "A bump to the leftmost non-zero digit of the version number." + }, + { + "const": "minor", + "title": "Minor", + "description": "A bump to the second leftmost non-zero digit of the version number." + }, + { + "const": "patch", + "title": "Patch", + "description": "A bump to the third leftmost non-zero digit of the version number." + } + ] + }, "doc": { "type": "object", "description": "You have the the option to provide different description of your PR for different audiences.", diff --git a/substrate/bin/minimal/node/Cargo.toml b/substrate/bin/minimal/node/Cargo.toml deleted file mode 100644 index 02b3b0a735bfeac104084bed774f529fb5bb0ddc..0000000000000000000000000000000000000000 --- a/substrate/bin/minimal/node/Cargo.toml +++ /dev/null @@ -1,60 +0,0 @@ -[package] -name = "minimal-node" -version = "4.0.0-dev" -description = "A fresh FRAME-based Substrate node, ready for hacking." -authors = ["Substrate DevHub "] -homepage = "https://substrate.io/" -edition = "2021" -license = "MIT-0" -publish = false -repository = "https://github.com/substrate-developer-hub/substrate-node-template/" -build = "build.rs" - -[lints] -workspace = true - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[[bin]] -name = "minimal-node" - -[dependencies] -clap = { version = "4.5.1", features = ["derive"] } -futures = { version = "0.3.21", features = ["thread-pool"] } -futures-timer = "3.0.1" -jsonrpsee = { version = "0.22", features = ["server"] } -serde_json = "1.0.113" - -sc-cli = { path = "../../../client/cli" } -sc-executor = { path = "../../../client/executor" } -sc-network = { path = "../../../client/network" } -sc-service = { path = "../../../client/service" } -sc-telemetry = { path = "../../../client/telemetry" } -sc-transaction-pool = { path = "../../../client/transaction-pool" } -sc-transaction-pool-api = { path = "../../../client/transaction-pool/api" } -sc-consensus = { path = "../../../client/consensus/common" } -sc-consensus-manual-seal = { path = "../../../client/consensus/manual-seal" } -sc-rpc-api = { path = "../../../client/rpc-api" } -sc-basic-authorship = { path = "../../../client/basic-authorship" } -sc-offchain = { path = "../../../client/offchain" } -sc-client-api = { path = "../../../client/api" } - -sp-timestamp = { path = "../../../primitives/timestamp" } -sp-keyring = { path = "../../../primitives/keyring" } -sp-api = { path = "../../../primitives/api" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-block-builder = { path = "../../../primitives/block-builder" } -sp-io = { path = "../../../primitives/io" } -sp-runtime = { path = "../../../primitives/runtime" } - -substrate-frame-rpc-system = { path = "../../../utils/frame/rpc/system" } - -frame = { path = "../../../frame", features = ["experimental", "runtime"] } -runtime = { package = "minimal-runtime", path = "../runtime" } - -[build-dependencies] -substrate-build-script-utils = { path = "../../../utils/build-script-utils" } - -[features] -default = [] diff --git a/substrate/bin/minimal/runtime/Cargo.toml b/substrate/bin/minimal/runtime/Cargo.toml deleted file mode 100644 index 296106544bbfdbbb1f7f76b86d5c8ddefb54f917..0000000000000000000000000000000000000000 --- a/substrate/bin/minimal/runtime/Cargo.toml +++ /dev/null @@ -1,50 +0,0 @@ -[package] -name = "minimal-runtime" -version = "0.1.0" -authors.workspace = true -description = "A minimal Substrate example runtime" -edition.workspace = true -repository.workspace = true -license.workspace = true -publish = false - -[lints] -workspace = true - -[dependencies] -parity-scale-codec = { version = "3.0.0", default-features = false } -scale-info = { version = "2.6.0", default-features = false } - -# this is a frame-based runtime, thus importing `frame` with runtime feature enabled. -frame = { path = "../../../frame", default-features = false, features = ["experimental", "runtime"] } -frame-support = { path = "../../../frame/support", default-features = false } - -# pallets that we want to use -pallet-balances = { path = "../../../frame/balances", default-features = false } -pallet-sudo = { path = "../../../frame/sudo", default-features = false } -pallet-timestamp = { path = "../../../frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../frame/transaction-payment/rpc/runtime-api", default-features = false } - -# genesis builder that allows us to interacto with runtime genesis config -sp-genesis-builder = { path = "../../../primitives/genesis-builder", default-features = false } - - -[build-dependencies] -substrate-wasm-builder = { path = "../../../utils/wasm-builder", optional = true } - -[features] -default = ["std"] -std = [ - "frame-support/std", - "frame/std", - "pallet-balances/std", - "pallet-sudo/std", - "pallet-timestamp/std", - "pallet-transaction-payment-rpc-runtime-api/std", - "pallet-transaction-payment/std", - "parity-scale-codec/std", - "scale-info/std", - "sp-genesis-builder/std", - "substrate-wasm-builder", -] diff --git a/substrate/bin/node-template/.editorconfig b/substrate/bin/node-template/.editorconfig deleted file mode 100644 index 5adac74ca24b3422222b2cb886b2a50cd22b6d1b..0000000000000000000000000000000000000000 --- a/substrate/bin/node-template/.editorconfig +++ /dev/null @@ -1,16 +0,0 @@ -root = true - -[*] -indent_style=space -indent_size=2 -tab_width=2 -end_of_line=lf -charset=utf-8 -trim_trailing_whitespace=true -insert_final_newline = true - -[*.{rs,toml}] -indent_style=tab -indent_size=tab -tab_width=4 -max_line_length=100 diff --git a/substrate/bin/node-template/env-setup/.envrc b/substrate/bin/node-template/env-setup/.envrc deleted file mode 100644 index 3550a30f2de389e537ee40ca5e64a77dc185c79b..0000000000000000000000000000000000000000 --- a/substrate/bin/node-template/env-setup/.envrc +++ /dev/null @@ -1 +0,0 @@ -use flake diff --git a/substrate/bin/node-template/node/Cargo.toml b/substrate/bin/node-template/node/Cargo.toml deleted file mode 100644 index 7a6955d2cec3df0e7277e72829321381deeda9ec..0000000000000000000000000000000000000000 --- a/substrate/bin/node-template/node/Cargo.toml +++ /dev/null @@ -1,92 +0,0 @@ -[package] -name = "node-template" -version = "4.0.0-dev" -description = "A fresh FRAME-based Substrate node, ready for hacking." -authors = ["Substrate DevHub "] -homepage = "https://substrate.io/" -edition.workspace = true -license = "MIT-0" -publish = false -repository = "https://github.com/substrate-developer-hub/substrate-node-template/" -build = "build.rs" - -[lints] -workspace = true - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[[bin]] -name = "node-template" - -[dependencies] -clap = { version = "4.5.1", features = ["derive"] } -futures = { version = "0.3.21", features = ["thread-pool"] } -serde_json = "1.0.113" - -sc-cli = { path = "../../../client/cli" } -sp-core = { path = "../../../primitives/core" } -sc-executor = { path = "../../../client/executor" } -sc-network = { path = "../../../client/network" } -sc-service = { path = "../../../client/service" } -sc-telemetry = { path = "../../../client/telemetry" } -sc-transaction-pool = { path = "../../../client/transaction-pool" } -sc-transaction-pool-api = { path = "../../../client/transaction-pool/api" } -sc-offchain = { path = "../../../client/offchain" } -sc-consensus-aura = { path = "../../../client/consensus/aura" } -sp-consensus-aura = { path = "../../../primitives/consensus/aura" } -sc-consensus = { path = "../../../client/consensus/common" } -sc-consensus-grandpa = { path = "../../../client/consensus/grandpa" } -sp-consensus-grandpa = { path = "../../../primitives/consensus/grandpa" } -sc-client-api = { path = "../../../client/api" } -sp-runtime = { path = "../../../primitives/runtime" } -sp-io = { path = "../../../primitives/io" } -sp-timestamp = { path = "../../../primitives/timestamp" } -sp-inherents = { path = "../../../primitives/inherents" } -sp-keyring = { path = "../../../primitives/keyring" } -frame-system = { path = "../../../frame/system" } -pallet-transaction-payment = { path = "../../../frame/transaction-payment", default-features = false } - -# These dependencies are used for the node template's RPCs -jsonrpsee = { version = "0.22", features = ["server"] } -sp-api = { path = "../../../primitives/api" } -sc-rpc-api = { path = "../../../client/rpc-api" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-block-builder = { path = "../../../primitives/block-builder" } -sc-basic-authorship = { path = "../../../client/basic-authorship" } -substrate-frame-rpc-system = { path = "../../../utils/frame/rpc/system" } -pallet-transaction-payment-rpc = { path = "../../../frame/transaction-payment/rpc" } - -# These dependencies are used for runtime benchmarking -frame-benchmarking = { path = "../../../frame/benchmarking" } -frame-benchmarking-cli = { path = "../../../utils/frame/benchmarking-cli" } - -# Local Dependencies -node-template-runtime = { path = "../runtime" } - -# CLI-specific dependencies -try-runtime-cli = { path = "../../../utils/frame/try-runtime/cli", optional = true } - -[build-dependencies] -substrate-build-script-utils = { path = "../../../utils/build-script-utils" } - -[features] -default = [] -# Dependencies that are only required if runtime benchmarking should be build. -runtime-benchmarks = [ - "frame-benchmarking-cli/runtime-benchmarks", - "frame-benchmarking/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "node-template-runtime/runtime-benchmarks", - "sc-service/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", -] -# Enable features that allow the runtime to be tried and debugged. Name might be subject to change -# in the near future. -try-runtime = [ - "frame-system/try-runtime", - "node-template-runtime/try-runtime", - "pallet-transaction-payment/try-runtime", - "sp-runtime/try-runtime", - "try-runtime-cli/try-runtime", -] diff --git a/substrate/bin/node-template/runtime/Cargo.toml b/substrate/bin/node-template/runtime/Cargo.toml deleted file mode 100644 index 95d92e6eadf056e3ac2f6ca7bcbaf4bd1d37499b..0000000000000000000000000000000000000000 --- a/substrate/bin/node-template/runtime/Cargo.toml +++ /dev/null @@ -1,125 +0,0 @@ -[package] -name = "node-template-runtime" -version = "4.0.0-dev" -description = "A fresh FRAME-based Substrate node, ready for hacking." -authors = ["Substrate DevHub "] -homepage = "https://substrate.io/" -edition.workspace = true -license = "MIT-0" -publish = false -repository = "https://github.com/substrate-developer-hub/substrate-node-template/" - -[lints] -workspace = true - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } - -pallet-aura = { path = "../../../frame/aura", default-features = false } -pallet-balances = { path = "../../../frame/balances", default-features = false } -frame-support = { path = "../../../frame/support", default-features = false } -pallet-grandpa = { path = "../../../frame/grandpa", default-features = false } -pallet-sudo = { path = "../../../frame/sudo", default-features = false } -frame-system = { path = "../../../frame/system", default-features = false } -frame-try-runtime = { path = "../../../frame/try-runtime", default-features = false, optional = true } -pallet-timestamp = { path = "../../../frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../frame/transaction-payment", default-features = false } -frame-executive = { path = "../../../frame/executive", default-features = false } -sp-api = { path = "../../../primitives/api", default-features = false } -sp-block-builder = { path = "../../../primitives/block-builder", default-features = false } -sp-consensus-aura = { path = "../../../primitives/consensus/aura", default-features = false, features = ["serde"] } -sp-consensus-grandpa = { path = "../../../primitives/consensus/grandpa", default-features = false, features = ["serde"] } -sp-core = { path = "../../../primitives/core", default-features = false, features = ["serde"] } -sp-inherents = { path = "../../../primitives/inherents", default-features = false } -sp-offchain = { path = "../../../primitives/offchain", default-features = false } -sp-runtime = { path = "../../../primitives/runtime", default-features = false, features = ["serde"] } -sp-session = { path = "../../../primitives/session", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } -sp-storage = { path = "../../../primitives/storage", default-features = false } -sp-transaction-pool = { path = "../../../primitives/transaction-pool", default-features = false } -sp-version = { path = "../../../primitives/version", default-features = false, features = ["serde"] } -serde_json = { version = "1.0.113", default-features = false, features = ["alloc"] } -sp-genesis-builder = { default-features = false, path = "../../../primitives/genesis-builder" } - -# Used for the node template's RPCs -frame-system-rpc-runtime-api = { path = "../../../frame/system/rpc/runtime-api", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../frame/transaction-payment/rpc/runtime-api", default-features = false } - -# Used for runtime benchmarking -frame-benchmarking = { path = "../../../frame/benchmarking", default-features = false, optional = true } -frame-system-benchmarking = { path = "../../../frame/system/benchmarking", default-features = false, optional = true } - -# Local Dependencies -pallet-template = { path = "../pallets/template", default-features = false } - -[build-dependencies] -substrate-wasm-builder = { path = "../../../utils/wasm-builder", optional = true } - -[features] -default = ["std"] -std = [ - "codec/std", - "frame-benchmarking?/std", - "frame-executive/std", - "frame-support/std", - "frame-system-benchmarking?/std", - "frame-system-rpc-runtime-api/std", - "frame-system/std", - "frame-try-runtime?/std", - "pallet-aura/std", - "pallet-balances/std", - "pallet-grandpa/std", - "pallet-sudo/std", - "pallet-template/std", - "pallet-timestamp/std", - "pallet-transaction-payment-rpc-runtime-api/std", - "pallet-transaction-payment/std", - "scale-info/std", - "serde_json/std", - "sp-api/std", - "sp-block-builder/std", - "sp-consensus-aura/std", - "sp-consensus-grandpa/std", - "sp-core/std", - "sp-genesis-builder/std", - "sp-inherents/std", - "sp-offchain/std", - "sp-runtime/std", - "sp-session/std", - "sp-std/std", - "sp-storage/std", - "sp-transaction-pool/std", - "sp-version/std", - "substrate-wasm-builder", -] -runtime-benchmarks = [ - "frame-benchmarking/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system-benchmarking/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "pallet-balances/runtime-benchmarks", - "pallet-grandpa/runtime-benchmarks", - "pallet-sudo/runtime-benchmarks", - "pallet-template/runtime-benchmarks", - "pallet-timestamp/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", -] -try-runtime = [ - "frame-executive/try-runtime", - "frame-support/try-runtime", - "frame-system/try-runtime", - "frame-try-runtime/try-runtime", - "pallet-aura/try-runtime", - "pallet-balances/try-runtime", - "pallet-grandpa/try-runtime", - "pallet-sudo/try-runtime", - "pallet-template/try-runtime", - "pallet-timestamp/try-runtime", - "pallet-transaction-payment/try-runtime", - "sp-runtime/try-runtime", -] -experimental = ["pallet-aura/experimental"] diff --git a/substrate/bin/node/bench/Cargo.toml b/substrate/bin/node/bench/Cargo.toml index ada58c79c77c7968950acc1b1316a33ca193e72b..8e0f7fb93b3904ac20902662326a7cba067fbf21 100644 --- a/substrate/bin/node/bench/Cargo.toml +++ b/substrate/bin/node/bench/Cargo.toml @@ -24,8 +24,8 @@ kitchensink-runtime = { path = "../runtime" } sc-client-api = { path = "../../../client/api" } sp-runtime = { path = "../../../primitives/runtime" } sp-state-machine = { path = "../../../primitives/state-machine" } -serde = "1.0.196" -serde_json = "1.0.113" +serde = { workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } derive_more = { version = "0.99.17", default-features = false, features = ["display"] } kvdb = "0.13.0" kvdb-rocksdb = "0.19.0" diff --git a/substrate/bin/node/cli/Cargo.toml b/substrate/bin/node/cli/Cargo.toml index aed2101a3f59244e15bfe114c73e19b3a33024b9..abe284c41da1f21c064cebcf05ee23c9f4f540f0 100644 --- a/substrate/bin/node/cli/Cargo.toml +++ b/substrate/bin/node/cli/Cargo.toml @@ -43,7 +43,7 @@ crate-type = ["cdylib", "rlib"] array-bytes = "6.1" clap = { version = "4.5.1", features = ["derive"], optional = true } codec = { package = "parity-scale-codec", version = "3.6.1" } -serde = { version = "1.0.196", features = ["derive"] } +serde = { features = ["derive"], workspace = true, default-features = true } jsonrpsee = { version = "0.22", features = ["server"] } futures = "0.3.21" log = { workspace = true, default-features = true } @@ -116,7 +116,7 @@ sc-cli = { path = "../../../client/cli", optional = true } frame-benchmarking-cli = { path = "../../../utils/frame/benchmarking-cli", optional = true } node-inspect = { package = "staging-node-inspect", path = "../inspect", optional = true } try-runtime-cli = { path = "../../../utils/frame/try-runtime/cli", optional = true } -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } [dev-dependencies] sc-keystore = { path = "../../../client/keystore" } @@ -159,7 +159,7 @@ sp-consensus-babe = { path = "../../../primitives/consensus/babe" } sp-externalities = { path = "../../../primitives/externalities" } sp-keyring = { path = "../../../primitives/keyring" } sp-runtime = { path = "../../../primitives/runtime" } -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } scale-info = { version = "2.10.0", features = ["derive", "serde"] } sp-trie = { path = "../../../primitives/trie" } sp-state-machine = { path = "../../../primitives/state-machine" } diff --git a/substrate/bin/node/cli/benches/block_production.rs b/substrate/bin/node/cli/benches/block_production.rs index d28cfbddfd222fa0400735e0a72090a2e814aa58..23a62cc0bd243ebd1c8ade712b3ded2361f9e4c0 100644 --- a/substrate/bin/node/cli/benches/block_production.rs +++ b/substrate/bin/node/cli/benches/block_production.rs @@ -28,7 +28,7 @@ use sc_consensus::{ use sc_service::{ config::{ BlocksPruning, DatabaseSource, KeystoreConfig, NetworkConfiguration, OffchainWorkerConfig, - PruningMode, WasmExecutionMethod, WasmtimeInstantiationStrategy, + PruningMode, RpcBatchRequestConfig, WasmExecutionMethod, WasmtimeInstantiationStrategy, }, BasePath, Configuration, Role, }; @@ -84,6 +84,7 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { rpc_max_subs_per_conn: Default::default(), rpc_port: 9944, rpc_message_buffer_capacity: Default::default(), + rpc_batch_config: RpcBatchRequestConfig::Unlimited, rpc_rate_limit: None, prometheus_config: None, telemetry_endpoints: None, diff --git a/substrate/bin/node/cli/benches/transaction_pool.rs b/substrate/bin/node/cli/benches/transaction_pool.rs index 1e25b7ce6fd876deff024808cf4694a0f1b5d82f..de4eef1944d41bc6f37d06819ad70ca0a6dad109 100644 --- a/substrate/bin/node/cli/benches/transaction_pool.rs +++ b/substrate/bin/node/cli/benches/transaction_pool.rs @@ -26,7 +26,7 @@ use node_primitives::AccountId; use sc_service::{ config::{ BlocksPruning, DatabaseSource, KeystoreConfig, NetworkConfiguration, OffchainWorkerConfig, - PruningMode, TransactionPoolOptions, + PruningMode, RpcBatchRequestConfig, TransactionPoolOptions, }, BasePath, Configuration, Role, }; @@ -80,6 +80,7 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { rpc_max_subs_per_conn: Default::default(), rpc_port: 9944, rpc_message_buffer_capacity: Default::default(), + rpc_batch_config: RpcBatchRequestConfig::Unlimited, rpc_rate_limit: None, prometheus_config: None, telemetry_endpoints: None, diff --git a/substrate/bin/node/cli/src/cli.rs b/substrate/bin/node/cli/src/cli.rs index f3c0435fd32dcaa71f9f94b78dcae01ff07c8b81..3345afae4fd2b181f147fd6fb6d5597d36db7885 100644 --- a/substrate/bin/node/cli/src/cli.rs +++ b/substrate/bin/node/cli/src/cli.rs @@ -63,7 +63,7 @@ pub enum Subcommand { /// Try-runtime has migrated to a standalone CLI /// (). The subcommand exists as a stub and - /// deprecation notice. It will be removed entirely some time after Janurary 2024. + /// deprecation notice. It will be removed entirely some time after January 2024. TryRuntime, /// Key management cli utilities diff --git a/substrate/bin/node/cli/tests/res/default_genesis_config.json b/substrate/bin/node/cli/tests/res/default_genesis_config.json index 1465a6497cadb3d79f75910dc2e084c168559c62..e21fbb47da8c4619e0923c85bf3470828cd80b23 100644 --- a/substrate/bin/node/cli/tests/res/default_genesis_config.json +++ b/substrate/bin/node/cli/tests/res/default_genesis_config.json @@ -2,7 +2,13 @@ "system": {}, "babe": { "authorities": [], - "epochConfig": null + "epochConfig": { + "allowed_slots": "PrimaryAndSecondaryVRFSlots", + "c": [ + 1, + 4 + ] + } }, "indices": { "indices": [] diff --git a/substrate/bin/node/inspect/Cargo.toml b/substrate/bin/node/inspect/Cargo.toml index 6c9be0fe1acb72d3e0395567f35bf9a9597085e5..7db6e3efd3a0fccc7ddd624e6ecb029c82637c03 100644 --- a/substrate/bin/node/inspect/Cargo.toml +++ b/substrate/bin/node/inspect/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] clap = { version = "4.5.1", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.6.1" } -thiserror = "1.0" +thiserror = { workspace = true } sc-cli = { path = "../../../client/cli" } sc-client-api = { path = "../../../client/api" } sc-service = { path = "../../../client/service", default-features = false } diff --git a/substrate/bin/node/runtime/Cargo.toml b/substrate/bin/node/runtime/Cargo.toml index 7bc608ba89e19b74a9b5f0cc214e8875e2a287f1..4d342ceb460a3f3a5c12b502915f2b679c2b469c 100644 --- a/substrate/bin/node/runtime/Cargo.toml +++ b/substrate/bin/node/runtime/Cargo.toml @@ -26,7 +26,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } static_assertions = "1.1.0" log = { workspace = true } -serde_json = { version = "1.0.113", default-features = false, features = ["alloc", "arbitrary_precision"] } +serde_json = { features = ["alloc", "arbitrary_precision"], workspace = true } # pallet-asset-conversion: turn on "num-traits" feature primitive-types = { version = "0.12.0", default-features = false, features = ["codec", "num-traits", "scale-info"] } @@ -58,7 +58,7 @@ sp-io = { path = "../../../primitives/io", default-features = false } frame-executive = { path = "../../../frame/executive", default-features = false } frame-benchmarking = { path = "../../../frame/benchmarking", default-features = false } frame-benchmarking-pallet-pov = { path = "../../../frame/benchmarking/pov", default-features = false } -frame-support = { path = "../../../frame/support", default-features = false, features = ["tuples-96"] } +frame-support = { path = "../../../frame/support", default-features = false, features = ["experimental", "tuples-96"] } frame-system = { path = "../../../frame/system", default-features = false } frame-system-benchmarking = { path = "../../../frame/system/benchmarking", default-features = false, optional = true } frame-election-provider-support = { path = "../../../frame/election-provider-support", default-features = false } @@ -88,6 +88,7 @@ pallet-election-provider-support-benchmarking = { path = "../../../frame/electio pallet-elections-phragmen = { path = "../../../frame/elections-phragmen", default-features = false } pallet-example-tasks = { path = "../../../frame/examples/tasks", default-features = false } pallet-fast-unstake = { path = "../../../frame/fast-unstake", default-features = false } +pallet-migrations = { path = "../../../frame/migrations", default-features = false } pallet-nis = { path = "../../../frame/nis", default-features = false } pallet-grandpa = { path = "../../../frame/grandpa", default-features = false } pallet-im-online = { path = "../../../frame/im-online", default-features = false } @@ -198,6 +199,7 @@ std = [ "pallet-lottery/std", "pallet-membership/std", "pallet-message-queue/std", + "pallet-migrations/std", "pallet-mixnet/std", "pallet-mmr/std", "pallet-multisig/std", @@ -302,6 +304,7 @@ runtime-benchmarks = [ "pallet-lottery/runtime-benchmarks", "pallet-membership/runtime-benchmarks", "pallet-message-queue/runtime-benchmarks", + "pallet-migrations/runtime-benchmarks", "pallet-mixnet/runtime-benchmarks", "pallet-mmr/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", @@ -381,6 +384,7 @@ try-runtime = [ "pallet-lottery/try-runtime", "pallet-membership/try-runtime", "pallet-message-queue/try-runtime", + "pallet-migrations/try-runtime", "pallet-mixnet/try-runtime", "pallet-mmr/try-runtime", "pallet-multisig/try-runtime", diff --git a/substrate/bin/node/runtime/src/impls.rs b/substrate/bin/node/runtime/src/impls.rs index 7ff52a758b3dd756dc4536df5928d3b2bb68a148..34f043b33a4edfe2d8cdbe154896e937826110ca 100644 --- a/substrate/bin/node/runtime/src/impls.rs +++ b/substrate/bin/node/runtime/src/impls.rs @@ -30,8 +30,8 @@ use pallet_identity::legacy::IdentityField; use sp_std::prelude::*; use crate::{ - AccountId, AllianceMotion, Assets, Authorship, Balances, Hash, NegativeImbalance, Runtime, - RuntimeCall, + AccountId, AllianceCollective, AllianceMotion, Assets, Authorship, Balances, Hash, + NegativeImbalance, Runtime, RuntimeCall, }; pub struct Author; @@ -107,7 +107,7 @@ impl ProposalProvider for AllianceProposalProvider } fn proposal_of(proposal_hash: Hash) -> Option { - AllianceMotion::proposal_of(proposal_hash) + pallet_collective::ProposalOf::::get(proposal_hash) } } @@ -276,7 +276,7 @@ mod multiplier_tests { let next = runtime_multiplier_update(fm); fm = next; if fm == min_multiplier() { - break + break; } iterations += 1; } diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index f36897ebdde00dd2179638aad05132cef378a2b5..3195ac59aa78ae2ae26cf90e089464d1fa819747 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -28,7 +28,7 @@ use frame_election_provider_support::{ onchain, BalancingConfig, ElectionDataProvider, SequentialPhragmen, VoteWeight, }; use frame_support::{ - construct_runtime, derive_impl, + derive_impl, dispatch::DispatchClass, dynamic_params::{dynamic_pallet_params, dynamic_params}, genesis_builder_helper::{build_config, create_default_config}, @@ -310,6 +310,7 @@ impl frame_system::Config for Runtime { type SystemWeightInfo = frame_system::weights::SubstrateWeight; type SS58Prefix = ConstU16<42>; type MaxConsumers = ConstU32<16>; + type MultiBlockMigrator = MultiBlockMigrations; } impl pallet_insecure_randomness_collective_flip::Config for Runtime {} @@ -1376,6 +1377,8 @@ impl pallet_contracts::Config for Runtime { type MaxCodeLen = ConstU32<{ 123 * 1024 }>; type MaxStorageKeyLen = ConstU32<128>; type UnsafeUnstableInterface = ConstBool; + type UploadOrigin = EnsureSigned; + type InstantiateOrigin = EnsureSigned; type MaxDebugBufferLen = ConstU32<{ 2 * 1024 * 1024 }>; type RuntimeHoldReason = RuntimeHoldReason; #[cfg(not(feature = "runtime-benchmarks"))] @@ -1386,6 +1389,7 @@ impl pallet_contracts::Config for Runtime { type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent; type Debug = (); type Environment = (); + type ApiVersion = (); type Xcm = (); } @@ -2034,6 +2038,25 @@ impl pallet_statement::Config for Runtime { type MaxAllowedBytes = MaxAllowedBytes; } +parameter_types! { + pub MbmServiceWeight: Weight = Perbill::from_percent(80) * RuntimeBlockWeights::get().max_block; +} + +impl pallet_migrations::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + #[cfg(not(feature = "runtime-benchmarks"))] + type Migrations = (); + // Benchmarks need mocked migrations to guarantee that they succeed. + #[cfg(feature = "runtime-benchmarks")] + type Migrations = pallet_migrations::mock_helpers::MockedMigrations; + type CursorMaxLen = ConstU32<65_536>; + type IdentifierMaxLen = ConstU32<256>; + type MigrationStatusHandler = (); + type FailedMigrationHandler = frame_support::migrations::FreezeChainOnFailedMigration; + type MaxServiceWeight = MbmServiceWeight; + type WeightInfo = pallet_migrations::weights::SubstrateWeight; +} + parameter_types! { pub const BrokerPalletId: PalletId = PalletId(*b"py/broke"); } @@ -2190,91 +2213,260 @@ impl pallet_parameters::Config for Runtime { type WeightInfo = (); } -construct_runtime!( - pub enum Runtime { - System: frame_system, - Utility: pallet_utility, - Babe: pallet_babe, - Timestamp: pallet_timestamp, - // Authorship must be before session in order to note author in the correct session and era - // for im-online and staking. - Authorship: pallet_authorship, - Indices: pallet_indices, - Balances: pallet_balances, - TransactionPayment: pallet_transaction_payment, - AssetTxPayment: pallet_asset_tx_payment, - AssetConversionTxPayment: pallet_asset_conversion_tx_payment, - ElectionProviderMultiPhase: pallet_election_provider_multi_phase, - Staking: pallet_staking, - Session: pallet_session, - Democracy: pallet_democracy, - Council: pallet_collective::, - TechnicalCommittee: pallet_collective::, - Elections: pallet_elections_phragmen, - TechnicalMembership: pallet_membership::, - Grandpa: pallet_grandpa, - Treasury: pallet_treasury, - AssetRate: pallet_asset_rate, - Contracts: pallet_contracts, - Sudo: pallet_sudo, - ImOnline: pallet_im_online, - AuthorityDiscovery: pallet_authority_discovery, - Offences: pallet_offences, - Historical: pallet_session_historical, - RandomnessCollectiveFlip: pallet_insecure_randomness_collective_flip, - Identity: pallet_identity, - Society: pallet_society, - Recovery: pallet_recovery, - Vesting: pallet_vesting, - Scheduler: pallet_scheduler, - Glutton: pallet_glutton, - Preimage: pallet_preimage, - Proxy: pallet_proxy, - Multisig: pallet_multisig, - Bounties: pallet_bounties, - Tips: pallet_tips, - Assets: pallet_assets::, - PoolAssets: pallet_assets::, - Beefy: pallet_beefy, - // MMR leaf construction must be after session in order to have a leaf's next_auth_set - // refer to block. See issue polkadot-fellows/runtimes#160 for details. - Mmr: pallet_mmr, - MmrLeaf: pallet_beefy_mmr, - Lottery: pallet_lottery, - Nis: pallet_nis, - Uniques: pallet_uniques, - Nfts: pallet_nfts, - NftFractionalization: pallet_nft_fractionalization, - Salary: pallet_salary, - CoreFellowship: pallet_core_fellowship, - TransactionStorage: pallet_transaction_storage, - VoterList: pallet_bags_list::, - StateTrieMigration: pallet_state_trie_migration, - ChildBounties: pallet_child_bounties, - Referenda: pallet_referenda, - Remark: pallet_remark, - RootTesting: pallet_root_testing, - ConvictionVoting: pallet_conviction_voting, - Whitelist: pallet_whitelist, - AllianceMotion: pallet_collective::, - Alliance: pallet_alliance, - NominationPools: pallet_nomination_pools, - RankedPolls: pallet_referenda::, - RankedCollective: pallet_ranked_collective, - AssetConversion: pallet_asset_conversion, - FastUnstake: pallet_fast_unstake, - MessageQueue: pallet_message_queue, - Pov: frame_benchmarking_pallet_pov, - TxPause: pallet_tx_pause, - SafeMode: pallet_safe_mode, - Statement: pallet_statement, - Broker: pallet_broker, - TasksExample: pallet_example_tasks, - Mixnet: pallet_mixnet, - Parameters: pallet_parameters, - SkipFeelessPayment: pallet_skip_feeless_payment, - } -); +#[frame_support::runtime] +mod runtime { + #[runtime::runtime] + #[runtime::derive( + RuntimeCall, + RuntimeEvent, + RuntimeError, + RuntimeOrigin, + RuntimeFreezeReason, + RuntimeHoldReason, + RuntimeSlashReason, + RuntimeLockId, + RuntimeTask + )] + pub struct Runtime; + + #[runtime::pallet_index(0)] + pub type System = frame_system; + + #[runtime::pallet_index(1)] + pub type Utility = pallet_utility; + + #[runtime::pallet_index(2)] + pub type Babe = pallet_babe; + + #[runtime::pallet_index(3)] + pub type Timestamp = pallet_timestamp; + + // Authorship must be before session in order to note author in the correct session and era + // for im-online and staking. + #[runtime::pallet_index(4)] + pub type Authorship = pallet_authorship; + + #[runtime::pallet_index(5)] + pub type Indices = pallet_indices; + + #[runtime::pallet_index(6)] + pub type Balances = pallet_balances; + + #[runtime::pallet_index(7)] + pub type TransactionPayment = pallet_transaction_payment; + + #[runtime::pallet_index(8)] + pub type AssetTxPayment = pallet_asset_tx_payment; + + #[runtime::pallet_index(9)] + pub type AssetConversionTxPayment = pallet_asset_conversion_tx_payment; + + #[runtime::pallet_index(10)] + pub type ElectionProviderMultiPhase = pallet_election_provider_multi_phase; + + #[runtime::pallet_index(11)] + pub type Staking = pallet_staking; + + #[runtime::pallet_index(12)] + pub type Session = pallet_session; + + #[runtime::pallet_index(13)] + pub type Democracy = pallet_democracy; + + #[runtime::pallet_index(14)] + pub type Council = pallet_collective; + + #[runtime::pallet_index(15)] + pub type TechnicalCommittee = pallet_collective; + + #[runtime::pallet_index(16)] + pub type Elections = pallet_elections_phragmen; + + #[runtime::pallet_index(17)] + pub type TechnicalMembership = pallet_membership; + + #[runtime::pallet_index(18)] + pub type Grandpa = pallet_grandpa; + + #[runtime::pallet_index(19)] + pub type Treasury = pallet_treasury; + + #[runtime::pallet_index(20)] + pub type AssetRate = pallet_asset_rate; + + #[runtime::pallet_index(21)] + pub type Contracts = pallet_contracts; + + #[runtime::pallet_index(22)] + pub type Sudo = pallet_sudo; + + #[runtime::pallet_index(23)] + pub type ImOnline = pallet_im_online; + + #[runtime::pallet_index(24)] + pub type AuthorityDiscovery = pallet_authority_discovery; + + #[runtime::pallet_index(25)] + pub type Offences = pallet_offences; + + #[runtime::pallet_index(26)] + pub type Historical = pallet_session_historical; + + #[runtime::pallet_index(27)] + pub type RandomnessCollectiveFlip = pallet_insecure_randomness_collective_flip; + + #[runtime::pallet_index(28)] + pub type Identity = pallet_identity; + + #[runtime::pallet_index(29)] + pub type Society = pallet_society; + + #[runtime::pallet_index(30)] + pub type Recovery = pallet_recovery; + + #[runtime::pallet_index(31)] + pub type Vesting = pallet_vesting; + + #[runtime::pallet_index(32)] + pub type Scheduler = pallet_scheduler; + + #[runtime::pallet_index(33)] + pub type Glutton = pallet_glutton; + + #[runtime::pallet_index(34)] + pub type Preimage = pallet_preimage; + + #[runtime::pallet_index(35)] + pub type Proxy = pallet_proxy; + + #[runtime::pallet_index(36)] + pub type Multisig = pallet_multisig; + + #[runtime::pallet_index(37)] + pub type Bounties = pallet_bounties; + + #[runtime::pallet_index(38)] + pub type Tips = pallet_tips; + + #[runtime::pallet_index(39)] + pub type Assets = pallet_assets; + + #[runtime::pallet_index(40)] + pub type PoolAssets = pallet_assets; + + #[runtime::pallet_index(41)] + pub type Beefy = pallet_beefy; + + // MMR leaf construction must be after session in order to have a leaf's next_auth_set + // refer to block. See issue polkadot-fellows/runtimes#160 for details. + #[runtime::pallet_index(42)] + pub type Mmr = pallet_mmr; + + #[runtime::pallet_index(43)] + pub type MmrLeaf = pallet_beefy_mmr; + + #[runtime::pallet_index(44)] + pub type Lottery = pallet_lottery; + + #[runtime::pallet_index(45)] + pub type Nis = pallet_nis; + + #[runtime::pallet_index(46)] + pub type Uniques = pallet_uniques; + + #[runtime::pallet_index(47)] + pub type Nfts = pallet_nfts; + + #[runtime::pallet_index(48)] + pub type NftFractionalization = pallet_nft_fractionalization; + + #[runtime::pallet_index(49)] + pub type Salary = pallet_salary; + + #[runtime::pallet_index(50)] + pub type CoreFellowship = pallet_core_fellowship; + + #[runtime::pallet_index(51)] + pub type TransactionStorage = pallet_transaction_storage; + + #[runtime::pallet_index(52)] + pub type VoterList = pallet_bags_list; + + #[runtime::pallet_index(53)] + pub type StateTrieMigration = pallet_state_trie_migration; + + #[runtime::pallet_index(54)] + pub type ChildBounties = pallet_child_bounties; + + #[runtime::pallet_index(55)] + pub type Referenda = pallet_referenda; + + #[runtime::pallet_index(56)] + pub type Remark = pallet_remark; + + #[runtime::pallet_index(57)] + pub type RootTesting = pallet_root_testing; + + #[runtime::pallet_index(58)] + pub type ConvictionVoting = pallet_conviction_voting; + + #[runtime::pallet_index(59)] + pub type Whitelist = pallet_whitelist; + + #[runtime::pallet_index(60)] + pub type AllianceMotion = pallet_collective; + + #[runtime::pallet_index(61)] + pub type Alliance = pallet_alliance; + + #[runtime::pallet_index(62)] + pub type NominationPools = pallet_nomination_pools; + + #[runtime::pallet_index(63)] + pub type RankedPolls = pallet_referenda; + + #[runtime::pallet_index(64)] + pub type RankedCollective = pallet_ranked_collective; + + #[runtime::pallet_index(65)] + pub type AssetConversion = pallet_asset_conversion; + + #[runtime::pallet_index(66)] + pub type FastUnstake = pallet_fast_unstake; + + #[runtime::pallet_index(67)] + pub type MessageQueue = pallet_message_queue; + + #[runtime::pallet_index(68)] + pub type Pov = frame_benchmarking_pallet_pov; + + #[runtime::pallet_index(69)] + pub type TxPause = pallet_tx_pause; + + #[runtime::pallet_index(70)] + pub type SafeMode = pallet_safe_mode; + + #[runtime::pallet_index(71)] + pub type Statement = pallet_statement; + + #[runtime::pallet_index(72)] + pub type MultiBlockMigrations = pallet_migrations; + + #[runtime::pallet_index(73)] + pub type Broker = pallet_broker; + + #[runtime::pallet_index(74)] + pub type TasksExample = pallet_example_tasks; + + #[runtime::pallet_index(75)] + pub type Mixnet = pallet_mixnet; + + #[runtime::pallet_index(76)] + pub type Parameters = pallet_parameters; + + #[runtime::pallet_index(77)] + pub type SkipFeelessPayment = pallet_skip_feeless_payment; +} /// The address format for describing accounts. pub type Address = sp_runtime::MultiAddress; @@ -2399,6 +2591,7 @@ mod benches { [pallet_lottery, Lottery] [pallet_membership, TechnicalMembership] [pallet_message_queue, MessageQueue] + [pallet_migrations, MultiBlockMigrations] [pallet_mmr, Mmr] [pallet_multisig, Multisig] [pallet_nomination_pools, NominationPoolsBench::] @@ -2444,7 +2637,7 @@ impl_runtime_apis! { Executive::execute_block(block); } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } diff --git a/substrate/bin/node/testing/src/genesis.rs b/substrate/bin/node/testing/src/genesis.rs index 6ec21fbe09342d1f704a4ed1a6f79b6160c27a01..c79612d68444c8bd64ad18c3b0a74ceed176eff8 100644 --- a/substrate/bin/node/testing/src/genesis.rs +++ b/substrate/bin/node/testing/src/genesis.rs @@ -20,9 +20,8 @@ use crate::keyring::*; use kitchensink_runtime::{ - constants::currency::*, AccountId, AssetsConfig, BabeConfig, BalancesConfig, GluttonConfig, - GrandpaConfig, IndicesConfig, RuntimeGenesisConfig, SessionConfig, SocietyConfig, StakerStatus, - StakingConfig, BABE_GENESIS_EPOCH_CONFIG, + constants::currency::*, AccountId, AssetsConfig, BalancesConfig, IndicesConfig, + RuntimeGenesisConfig, SessionConfig, SocietyConfig, StakerStatus, StakingConfig, }; use sp_keyring::Ed25519Keyring; use sp_runtime::Perbill; @@ -47,7 +46,6 @@ pub fn config_endowed(extra_endowed: Vec) -> RuntimeGenesisConfig { endowed.extend(extra_endowed.into_iter().map(|endowed| (endowed, 100 * DOLLARS))); RuntimeGenesisConfig { - system: Default::default(), indices: IndicesConfig { indices: vec![] }, balances: BalancesConfig { balances: endowed }, session: SessionConfig { @@ -69,39 +67,8 @@ pub fn config_endowed(extra_endowed: Vec) -> RuntimeGenesisConfig { invulnerables: vec![alice(), bob(), charlie()], ..Default::default() }, - babe: BabeConfig { - authorities: vec![], - epoch_config: Some(BABE_GENESIS_EPOCH_CONFIG), - ..Default::default() - }, - grandpa: GrandpaConfig { authorities: vec![], _config: Default::default() }, - beefy: Default::default(), - im_online: Default::default(), - authority_discovery: Default::default(), - democracy: Default::default(), - council: Default::default(), - technical_committee: Default::default(), - technical_membership: Default::default(), - elections: Default::default(), - sudo: Default::default(), - treasury: Default::default(), society: SocietyConfig { pot: 0 }, - vesting: Default::default(), assets: AssetsConfig { assets: vec![(9, alice(), true, 1)], ..Default::default() }, - pool_assets: Default::default(), - transaction_storage: Default::default(), - transaction_payment: Default::default(), - alliance: Default::default(), - alliance_motion: Default::default(), - nomination_pools: Default::default(), - safe_mode: Default::default(), - tx_pause: Default::default(), - glutton: GluttonConfig { - compute: Default::default(), - storage: Default::default(), - trash_data_count: Default::default(), - ..Default::default() - }, - mixnet: Default::default(), + ..Default::default() } } diff --git a/substrate/bin/utils/chain-spec-builder/Cargo.toml b/substrate/bin/utils/chain-spec-builder/Cargo.toml index 8b3180fbbcd1591e6b70cd0ea95ef6fafe50b64f..996372c8a0f8638ad4420dc8e21c8f1d418bb60e 100644 --- a/substrate/bin/utils/chain-spec-builder/Cargo.toml +++ b/substrate/bin/utils/chain-spec-builder/Cargo.toml @@ -26,5 +26,5 @@ crate-type = ["rlib"] clap = { version = "4.5.1", features = ["derive"] } log = { workspace = true, default-features = true } sc-chain-spec = { path = "../../../client/chain-spec" } -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } sp-tracing = { path = "../../../primitives/tracing" } diff --git a/substrate/client/allocator/Cargo.toml b/substrate/client/allocator/Cargo.toml index 662125814fbe11ed85e00788b66929a6e06ce793..2c268b548ea9c32fabdc82226c77e82a7ef59cea 100644 --- a/substrate/client/allocator/Cargo.toml +++ b/substrate/client/allocator/Cargo.toml @@ -18,6 +18,6 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = { workspace = true, default-features = true } -thiserror = "1.0.48" +thiserror = { workspace = true } sp-core = { path = "../../primitives/core" } sp-wasm-interface = { path = "../../primitives/wasm-interface" } diff --git a/substrate/client/api/Cargo.toml b/substrate/client/api/Cargo.toml index 09f60a476702c842ff80a36437f7a9f91eb968a9..cd7b613e277f3952304d70c3bf63c36d09432bc2 100644 --- a/substrate/client/api/Cargo.toml +++ b/substrate/client/api/Cargo.toml @@ -41,6 +41,6 @@ sp-storage = { path = "../../primitives/storage" } sp-trie = { path = "../../primitives/trie" } [dev-dependencies] -thiserror = "1.0.48" +thiserror = { workspace = true } sp-test-primitives = { path = "../../primitives/test-primitives" } substrate-test-runtime = { path = "../../test-utils/runtime" } diff --git a/substrate/client/api/src/client.rs b/substrate/client/api/src/client.rs index 46232c74539c6c230652a753b5fcd4b6761600d8..2de09840e4dfdc15e1f1d1acaa5f3e438de0caca 100644 --- a/substrate/client/api/src/client.rs +++ b/substrate/client/api/src/client.rs @@ -278,7 +278,7 @@ impl fmt::Display for UsageInfo { pub struct UnpinHandleInner { /// Hash of the block pinned by this handle hash: Block::Hash, - unpin_worker_sender: TracingUnboundedSender, + unpin_worker_sender: TracingUnboundedSender>, } impl Debug for UnpinHandleInner { @@ -291,7 +291,7 @@ impl UnpinHandleInner { /// Create a new [`UnpinHandleInner`] pub fn new( hash: Block::Hash, - unpin_worker_sender: TracingUnboundedSender, + unpin_worker_sender: TracingUnboundedSender>, ) -> Self { Self { hash, unpin_worker_sender } } @@ -299,12 +299,25 @@ impl UnpinHandleInner { impl Drop for UnpinHandleInner { fn drop(&mut self) { - if let Err(err) = self.unpin_worker_sender.unbounded_send(self.hash) { + if let Err(err) = + self.unpin_worker_sender.unbounded_send(UnpinWorkerMessage::Unpin(self.hash)) + { log::debug!(target: "db", "Unable to unpin block with hash: {}, error: {:?}", self.hash, err); }; } } +/// Message that signals notification-based pinning actions to the pinning-worker. +/// +/// When the notification is dropped, an `Unpin` message should be sent to the worker. +#[derive(Debug)] +pub enum UnpinWorkerMessage { + /// Should be sent when a import or finality notification is created. + AnnouncePin(Block::Hash), + /// Should be sent when a import or finality notification is dropped. + Unpin(Block::Hash), +} + /// Keeps a specific block pinned while the handle is alive. /// Once the last handle instance for a given block is dropped, the /// block is unpinned in the [`Backend`](crate::backend::Backend::unpin_block). @@ -315,7 +328,7 @@ impl UnpinHandle { /// Create a new [`UnpinHandle`] pub fn new( hash: Block::Hash, - unpin_worker_sender: TracingUnboundedSender, + unpin_worker_sender: TracingUnboundedSender>, ) -> UnpinHandle { UnpinHandle(Arc::new(UnpinHandleInner::new(hash, unpin_worker_sender))) } @@ -353,7 +366,7 @@ impl BlockImportNotification { header: Block::Header, is_new_best: bool, tree_route: Option>>, - unpin_worker_sender: TracingUnboundedSender, + unpin_worker_sender: TracingUnboundedSender>, ) -> Self { Self { hash, @@ -412,7 +425,7 @@ impl FinalityNotification { /// Create finality notification from finality summary. pub fn from_summary( mut summary: FinalizeSummary, - unpin_worker_sender: TracingUnboundedSender, + unpin_worker_sender: TracingUnboundedSender>, ) -> FinalityNotification { let hash = summary.finalized.pop().unwrap_or_default(); FinalityNotification { @@ -436,7 +449,7 @@ impl BlockImportNotification { /// Create finality notification from finality summary. pub fn from_summary( summary: ImportSummary, - unpin_worker_sender: TracingUnboundedSender, + unpin_worker_sender: TracingUnboundedSender>, ) -> BlockImportNotification { let hash = summary.hash; BlockImportNotification { diff --git a/substrate/client/authority-discovery/Cargo.toml b/substrate/client/authority-discovery/Cargo.toml index 78f7144f4368d7975898c24e30c7cf8e4e0fdcc0..cdd4052f0b0998f283ede885f7e7de7509ecd169 100644 --- a/substrate/client/authority-discovery/Cargo.toml +++ b/substrate/client/authority-discovery/Cargo.toml @@ -32,7 +32,7 @@ multihash = { version = "0.18.1", default-features = false, features = [ log = { workspace = true, default-features = true } prost = "0.12" rand = "0.8.5" -thiserror = "1.0" +thiserror = { workspace = true } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } sc-client-api = { path = "../api" } sc-network = { path = "../network" } diff --git a/substrate/client/authority-discovery/src/worker.rs b/substrate/client/authority-discovery/src/worker.rs index 6db25416dee78323a1531f5868be2601af3eaee0..9bccb96ff378f262e9b66b4268d2ed95befe2bdb 100644 --- a/substrate/client/authority-discovery/src/worker.rs +++ b/substrate/client/authority-discovery/src/worker.rs @@ -306,29 +306,30 @@ where fn addresses_to_publish(&self) -> impl Iterator { let peer_id: Multihash = self.network.local_peer_id().into(); let publish_non_global_ips = self.publish_non_global_ips; - self.network - .external_addresses() - .into_iter() - .filter(move |a| { - if publish_non_global_ips { - return true - } + let addresses = self.network.external_addresses().into_iter().filter(move |a| { + if publish_non_global_ips { + return true + } - a.iter().all(|p| match p { - // The `ip_network` library is used because its `is_global()` method is stable, - // while `is_global()` in the standard library currently isn't. - multiaddr::Protocol::Ip4(ip) if !IpNetwork::from(ip).is_global() => false, - multiaddr::Protocol::Ip6(ip) if !IpNetwork::from(ip).is_global() => false, - _ => true, - }) - }) - .map(move |a| { - if a.iter().any(|p| matches!(p, multiaddr::Protocol::P2p(_))) { - a - } else { - a.with(multiaddr::Protocol::P2p(peer_id)) - } + a.iter().all(|p| match p { + // The `ip_network` library is used because its `is_global()` method is stable, + // while `is_global()` in the standard library currently isn't. + multiaddr::Protocol::Ip4(ip) if !IpNetwork::from(ip).is_global() => false, + multiaddr::Protocol::Ip6(ip) if !IpNetwork::from(ip).is_global() => false, + _ => true, }) + }); + + debug!(target: LOG_TARGET, "Authority DHT record peer_id='{:?}' addresses='{:?}'", peer_id, addresses.clone().collect::>()); + + // The address must include the peer id if not already set. + addresses.map(move |a| { + if a.iter().any(|p| matches!(p, multiaddr::Protocol::P2p(_))) { + a + } else { + a.with(multiaddr::Protocol::P2p(peer_id)) + } + }) } /// Publish own public addresses. diff --git a/substrate/client/basic-authorship/src/basic_authorship.rs b/substrate/client/basic-authorship/src/basic_authorship.rs index c07f3e639c3e15b572dbb470c1518b9ddfddae32..932287ac86577b0e1a9b2c057320aeda9d12319a 100644 --- a/substrate/client/basic-authorship/src/basic_authorship.rs +++ b/substrate/client/basic-authorship/src/basic_authorship.rs @@ -38,7 +38,7 @@ use sp_core::traits::SpawnNamed; use sp_inherents::InherentData; use sp_runtime::{ traits::{BlakeTwo256, Block as BlockT, Hash as HashT, Header as HeaderT}, - Digest, Percent, SaturatedConversion, + Digest, ExtrinsicInclusionMode, Percent, SaturatedConversion, }; use std::{marker::PhantomData, pin::Pin, sync::Arc, time}; @@ -87,6 +87,22 @@ pub struct ProposerFactory { _phantom: PhantomData, } +impl Clone for ProposerFactory { + fn clone(&self) -> Self { + Self { + spawn_handle: self.spawn_handle.clone(), + client: self.client.clone(), + transaction_pool: self.transaction_pool.clone(), + metrics: self.metrics.clone(), + default_block_size_limit: self.default_block_size_limit, + soft_deadline_percent: self.soft_deadline_percent, + telemetry: self.telemetry.clone(), + include_proof_in_block_size_estimation: self.include_proof_in_block_size_estimation, + _phantom: self._phantom, + } + } +} + impl ProposerFactory { /// Create a new proposer factory. /// @@ -319,11 +335,12 @@ where self.apply_inherents(&mut block_builder, inherent_data)?; - // TODO call `after_inherents` and check if we should apply extrinsincs here - // - - let end_reason = - self.apply_extrinsics(&mut block_builder, deadline, block_size_limit).await?; + let mode = block_builder.extrinsic_inclusion_mode(); + let end_reason = match mode { + ExtrinsicInclusionMode::AllExtrinsics => + self.apply_extrinsics(&mut block_builder, deadline, block_size_limit).await?, + ExtrinsicInclusionMode::OnlyInherents => EndProposingReason::TransactionForbidden, + }; let (block, storage_changes, proof) = block_builder.build()?.into_inner(); let block_took = block_timer.elapsed(); diff --git a/substrate/client/block-builder/src/lib.rs b/substrate/client/block-builder/src/lib.rs index 258e39d962b2de2397d85a54223c155e821ddfa3..2f22cd42591fc60bf1665d00a1c5b2b548ad3aea 100644 --- a/substrate/client/block-builder/src/lib.rs +++ b/substrate/client/block-builder/src/lib.rs @@ -37,7 +37,7 @@ use sp_core::traits::CallContext; use sp_runtime::{ legacy, traits::{Block as BlockT, Hash, HashingFor, Header as HeaderT, NumberFor, One}, - Digest, + Digest, ExtrinsicInclusionMode, }; use std::marker::PhantomData; @@ -198,10 +198,12 @@ pub struct BlockBuilder<'a, Block: BlockT, C: ProvideRuntimeApi + 'a> { extrinsics: Vec, api: ApiRef<'a, C::Api>, call_api_at: &'a C, + /// Version of the [`BlockBuilderApi`] runtime API. version: u32, parent_hash: Block::Hash, /// The estimated size of the block header. estimated_header_size: usize, + extrinsic_inclusion_mode: ExtrinsicInclusionMode, } impl<'a, Block, C> BlockBuilder<'a, Block, C> @@ -244,9 +246,19 @@ where api.set_call_context(CallContext::Onchain); - api.initialize_block(parent_hash, &header)?; + let core_version = api + .api_version::>(parent_hash)? + .ok_or_else(|| Error::VersionInvalid("Core".to_string()))?; - let version = api + let extrinsic_inclusion_mode = if core_version >= 5 { + api.initialize_block(parent_hash, &header)? + } else { + #[allow(deprecated)] + api.initialize_block_before_version_5(parent_hash, &header)?; + ExtrinsicInclusionMode::AllExtrinsics + }; + + let bb_version = api .api_version::>(parent_hash)? .ok_or_else(|| Error::VersionInvalid("BlockBuilderApi".to_string()))?; @@ -254,12 +266,18 @@ where parent_hash, extrinsics: Vec::new(), api, - version, + version: bb_version, estimated_header_size, call_api_at, + extrinsic_inclusion_mode, }) } + /// The extrinsic inclusion mode of the runtime for this block. + pub fn extrinsic_inclusion_mode(&self) -> ExtrinsicInclusionMode { + self.extrinsic_inclusion_mode + } + /// Push onto the block's list of extrinsics. /// /// This will ensure the extrinsic can be validly executed (by executing it). diff --git a/substrate/client/chain-spec/Cargo.toml b/substrate/client/chain-spec/Cargo.toml index 82a5f7f55b04699a929a5a8dfad9af70125c27b7..f2138c07d71ad4c524e12d33ffb73d453d707fbb 100644 --- a/substrate/client/chain-spec/Cargo.toml +++ b/substrate/client/chain-spec/Cargo.toml @@ -18,8 +18,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } memmap2 = "0.9.3" -serde = { version = "1.0.196", features = ["derive"] } -serde_json = "1.0.113" +serde = { features = ["derive"], workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } sc-client-api = { path = "../api" } sc-chain-spec-derive = { path = "derive" } sc-executor = { path = "../executor" } diff --git a/substrate/client/chain-spec/derive/Cargo.toml b/substrate/client/chain-spec/derive/Cargo.toml index 4e95206df6b5ec3a84205cd445d46ae49a7d67db..521eee578ecae3b03cf86a3b4e3630bb7cd22f02 100644 --- a/substrate/client/chain-spec/derive/Cargo.toml +++ b/substrate/client/chain-spec/derive/Cargo.toml @@ -20,5 +20,5 @@ proc-macro = true [dependencies] proc-macro-crate = "3.0.0" proc-macro2 = "1.0.56" -quote = "1.0.28" -syn = "2.0.49" +quote = { workspace = true } +syn = { workspace = true } diff --git a/substrate/client/chain-spec/src/chain_spec.rs b/substrate/client/chain-spec/src/chain_spec.rs index fe8fcfda216e1fa86215daf93add2aa6adc78bd9..78e81e10d2b613d83c98e8b689cba2fd1356287c 100644 --- a/substrate/client/chain-spec/src/chain_spec.rs +++ b/substrate/client/chain-spec/src/chain_spec.rs @@ -1237,15 +1237,7 @@ mod tests { "TestName", "test", ChainType::Local, - move || substrate_test_runtime::RuntimeGenesisConfig { - babe: substrate_test_runtime::BabeConfig { - epoch_config: Some( - substrate_test_runtime::TEST_RUNTIME_BABE_EPOCH_CONFIGURATION, - ), - ..Default::default() - }, - ..Default::default() - }, + || Default::default(), Vec::new(), None, None, diff --git a/substrate/client/chain-spec/src/genesis_block.rs b/substrate/client/chain-spec/src/genesis_block.rs index 6aa156a620a7933034643f8a7bdbf8e0d617ad97..3c7b9f64dcd6bc0babff662747571f9d31853eec 100644 --- a/substrate/client/chain-spec/src/genesis_block.rs +++ b/substrate/client/chain-spec/src/genesis_block.rs @@ -18,23 +18,25 @@ //! Tool for creating the genesis block. -use std::{collections::hash_map::DefaultHasher, marker::PhantomData, sync::Arc}; +use std::{marker::PhantomData, sync::Arc}; +use codec::Encode; use sc_client_api::{backend::Backend, BlockImportOperation}; use sc_executor::RuntimeVersionOf; use sp_core::storage::{well_known_keys, StateVersion, Storage}; use sp_runtime::{ - traits::{Block as BlockT, Hash as HashT, Header as HeaderT, Zero}, + traits::{Block as BlockT, Hash as HashT, HashingFor, Header as HeaderT, Zero}, BuildStorage, }; /// Return the state version given the genesis storage and executor. -pub fn resolve_state_version_from_wasm( +pub fn resolve_state_version_from_wasm( storage: &Storage, executor: &E, ) -> sp_blockchain::Result where E: RuntimeVersionOf, + H: HashT, { if let Some(wasm) = storage.top.get(well_known_keys::CODE) { let mut ext = sp_state_machine::BasicExternalities::new_empty(); // just to read runtime version. @@ -43,12 +45,7 @@ where let runtime_code = sp_core::traits::RuntimeCode { code_fetcher: &code_fetcher, heap_pages: None, - hash: { - use std::hash::{Hash, Hasher}; - let mut state = DefaultHasher::new(); - wasm.hash(&mut state); - state.finish().to_le_bytes().to_vec() - }, + hash: ::hash(wasm).encode(), }; let runtime_version = RuntimeVersionOf::runtime_version(executor, &mut ext, &runtime_code) .map_err(|e| sp_blockchain::Error::VersionInvalid(e.to_string()))?; @@ -129,7 +126,8 @@ impl, E: RuntimeVersionOf> BuildGenesisBlock sp_blockchain::Result<(Block, Self::BlockImportOperation)> { let Self { genesis_storage, commit_genesis_state, backend, executor, _phantom } = self; - let genesis_state_version = resolve_state_version_from_wasm(&genesis_storage, &executor)?; + let genesis_state_version = + resolve_state_version_from_wasm::<_, HashingFor>(&genesis_storage, &executor)?; let mut op = backend.begin_operation()?; let state_root = op.set_genesis_state(genesis_storage, commit_genesis_state, genesis_state_version)?; diff --git a/substrate/client/chain-spec/src/genesis_config_builder.rs b/substrate/client/chain-spec/src/genesis_config_builder.rs index 8766dd5c5ad28c9263f7538d255e03a898244026..c8b54f66be6f52ffb6900fafd2e8ec1652030976 100644 --- a/substrate/client/chain-spec/src/genesis_config_builder.rs +++ b/substrate/client/chain-spec/src/genesis_config_builder.rs @@ -81,7 +81,8 @@ where .0 } - /// Returns the default `GenesisConfig` provided by the `runtime`. + /// Returns a json representation of the default `RuntimeGenesisConfig` provided by the + /// `runtime`. /// /// Calls [`GenesisBuilder::create_default_config`](sp_genesis_builder::GenesisBuilder::create_default_config) in the `runtime`. pub fn get_default_config(&self) -> core::result::Result { @@ -94,7 +95,7 @@ where Ok(from_slice(&default_config[..]).expect("returned value is json. qed.")) } - /// Build the given `GenesisConfig` and returns the genesis state. + /// Builds `RuntimeGenesisConfig` from given json blob and returns the genesis state. /// /// Calls [`GenesisBuilder::build_config`](sp_genesis_builder::GenesisBuilder::build_config) /// provided by the `runtime`. @@ -111,25 +112,26 @@ where Ok(ext.into_storages()) } - /// Creates the genesis state by patching the default `GenesisConfig` and applying it. + /// Creates the genesis state by patching the default `RuntimeGenesisConfig`. /// - /// This function generates the `GenesisConfig` for the runtime by applying a provided JSON - /// patch. The patch modifies the default `GenesisConfig` allowing customization of the specific - /// keys. The resulting `GenesisConfig` is then deserialized from the patched JSON - /// representation and stored in the storage. + /// This function generates the `RuntimeGenesisConfig` for the runtime by applying a provided + /// JSON patch. The patch modifies the default `RuntimeGenesisConfig` allowing customization of + /// the specific keys. The resulting `RuntimeGenesisConfig` is then deserialized from the + /// patched JSON representation and stored in the storage. /// /// If the provided JSON patch is incorrect or the deserialization fails the error will be /// returned. /// - /// The patching process modifies the default `GenesisConfig` according to the following rules: + /// The patching process modifies the default `RuntimeGenesisConfig` according to the following + /// rules: /// 1. Existing keys in the default configuration will be overridden by the corresponding values /// in the patch. /// 2. If a key exists in the patch but not in the default configuration, it will be added to - /// the resulting `GenesisConfig`. + /// the resulting `RuntimeGenesisConfig`. /// 3. Keys in the default configuration that have null values in the patch will be removed from - /// the resulting `GenesisConfig`. This is helpful for changing enum variant value. + /// the resulting `RuntimeGenesisConfig`. This is helpful for changing enum variant value. /// - /// Please note that the patch may contain full `GenesisConfig`. + /// Please note that the patch may contain full `RuntimeGenesisConfig`. pub fn get_storage_for_patch(&self, patch: Value) -> core::result::Result { let mut config = self.get_default_config()?; crate::json_patch::merge(&mut config, patch); @@ -149,7 +151,7 @@ mod tests { ::new(substrate_test_runtime::wasm_binary_unwrap()) .get_default_config() .unwrap(); - let expected = r#"{"system":{},"babe":{"authorities":[],"epochConfig":null},"substrateTest":{"authorities":[]},"balances":{"balances":[]}}"#; + let expected = r#"{"babe": {"authorities": [], "epochConfig": {"allowed_slots": "PrimaryAndSecondaryVRFSlots", "c": [1, 4]}}, "balances": {"balances": []}, "substrateTest": {"authorities": []}, "system": {}}"#; assert_eq!(from_str::(expected).unwrap(), config); } diff --git a/substrate/client/cli/Cargo.toml b/substrate/client/cli/Cargo.toml index 1b4d3733baa05f3b5deda185f2c63a8652b1e413..c70a0893c72505ce37d659225a93569ce98a025c 100644 --- a/substrate/client/cli/Cargo.toml +++ b/substrate/client/cli/Cargo.toml @@ -29,10 +29,11 @@ parity-scale-codec = "3.6.1" rand = "0.8.5" regex = "1.6.0" rpassword = "7.0.0" -serde = "1.0.196" -serde_json = "1.0.113" -thiserror = "1.0.48" -bip39 = "2.0.0" +serde = { workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } +thiserror = { workspace = true } +# personal fork here as workaround for: https://github.com/rust-bitcoin/rust-bip39/pull/64 +bip39 = { package = "parity-bip39", version = "2.0.1", features = ["rand"] } tokio = { version = "1.22.0", features = ["parking_lot", "rt-multi-thread", "signal"] } sc-client-api = { path = "../api" } sc-client-db = { path = "../db", default-features = false } diff --git a/substrate/client/cli/src/commands/generate.rs b/substrate/client/cli/src/commands/generate.rs index c465bcc85a47be16a5217677311499ce42573895..94769279e21940993785b2a4eeb6fed5d08ddeb6 100644 --- a/substrate/client/cli/src/commands/generate.rs +++ b/substrate/client/cli/src/commands/generate.rs @@ -64,7 +64,7 @@ impl GenerateCmd { let password = self.keystore_params.read_password()?; let output = self.output_scheme.output_type; - let phrase = mnemonic.word_iter().join(" "); + let phrase = mnemonic.words().join(" "); with_crypto_scheme!( self.crypto_scheme.scheme, diff --git a/substrate/client/cli/src/commands/run_cmd.rs b/substrate/client/cli/src/commands/run_cmd.rs index c50198cf3cb8c086813551bae884f98e69d75732..221c32affd5a91d680f23ed11eb5f7a9a4051da2 100644 --- a/substrate/client/cli/src/commands/run_cmd.rs +++ b/substrate/client/cli/src/commands/run_cmd.rs @@ -30,7 +30,7 @@ use crate::{ use clap::Parser; use regex::Regex; use sc_service::{ - config::{BasePath, PrometheusConfig, TransactionPoolOptions}, + config::{BasePath, PrometheusConfig, RpcBatchRequestConfig, TransactionPoolOptions}, ChainSpec, Role, }; use sc_telemetry::TelemetryEndpoints; @@ -125,6 +125,14 @@ pub struct RunCmd { #[arg(long, default_value_t = RPC_DEFAULT_MESSAGE_CAPACITY_PER_CONN)] pub rpc_message_buffer_capacity_per_connection: u32, + /// Disable RPC batch requests + #[arg(long, alias = "rpc_no_batch_requests", conflicts_with_all = &["rpc_max_batch_request_len"])] + pub rpc_disable_batch_requests: bool, + + /// Limit the max length per RPC batch request + #[arg(long, conflicts_with_all = &["rpc_disable_batch_requests"], value_name = "LEN")] + pub rpc_max_batch_request_len: Option, + /// Specify browser *origins* allowed to access the HTTP & WS RPC servers. /// /// A comma-separated list of origins (protocol://domain or special `null` @@ -411,6 +419,22 @@ impl CliConfiguration for RunCmd { Ok(self.rpc_max_subscriptions_per_connection) } + fn rpc_buffer_capacity_per_connection(&self) -> Result { + Ok(self.rpc_message_buffer_capacity_per_connection) + } + + fn rpc_batch_config(&self) -> Result { + let cfg = if self.rpc_disable_batch_requests { + RpcBatchRequestConfig::Disabled + } else if let Some(l) = self.rpc_max_batch_request_len { + RpcBatchRequestConfig::Limit(l) + } else { + RpcBatchRequestConfig::Unlimited + }; + + Ok(cfg) + } + fn rpc_rate_limit(&self) -> Result> { Ok(self.rpc_rate_limit) } diff --git a/substrate/client/cli/src/config.rs b/substrate/client/cli/src/config.rs index 78015cf8373d1e44f020304d8e258cad742f3adc..5def9ce9b72620eb7942ac6ee68b16493f6b8053 100644 --- a/substrate/client/cli/src/config.rs +++ b/substrate/client/cli/src/config.rs @@ -28,7 +28,8 @@ use sc_service::{ config::{ BasePath, Configuration, DatabaseSource, KeystoreConfig, NetworkConfiguration, NodeKeyConfig, OffchainWorkerConfig, OutputFormat, PrometheusConfig, PruningMode, Role, - RpcMethods, TelemetryEndpoints, TransactionPoolOptions, WasmExecutionMethod, + RpcBatchRequestConfig, RpcMethods, TelemetryEndpoints, TransactionPoolOptions, + WasmExecutionMethod, }, BlocksPruning, ChainSpec, TracingReceiver, }; @@ -338,7 +339,12 @@ pub trait CliConfiguration: Sized { Ok(RPC_DEFAULT_MESSAGE_CAPACITY_PER_CONN) } - /// Rate limit calls per minute. + /// RPC server batch request configuration. + fn rpc_batch_config(&self) -> Result { + Ok(RpcBatchRequestConfig::Unlimited) + } + + /// RPC rate limit configuration. fn rpc_rate_limit(&self) -> Result> { Ok(None) } @@ -515,6 +521,7 @@ pub trait CliConfiguration: Sized { rpc_max_subs_per_conn: self.rpc_max_subscriptions_per_connection()?, rpc_port: DCV::rpc_listen_port(), rpc_message_buffer_capacity: self.rpc_buffer_capacity_per_connection()?, + rpc_batch_config: self.rpc_batch_config()?, rpc_rate_limit: self.rpc_rate_limit()?, prometheus_config: self .prometheus_config(DCV::prometheus_listen_port(), &chain_spec)?, diff --git a/substrate/client/cli/src/runner.rs b/substrate/client/cli/src/runner.rs index b4937db71e690bd02c89a4ee1dd4f0b389a7e366..4201a0f4062fb0063621d85dd02feb0deade24bc 100644 --- a/substrate/client/cli/src/runner.rs +++ b/substrate/client/cli/src/runner.rs @@ -271,6 +271,7 @@ mod tests { rpc_max_subs_per_conn: Default::default(), rpc_message_buffer_capacity: Default::default(), rpc_port: 9944, + rpc_batch_config: sc_service::config::RpcBatchRequestConfig::Unlimited, rpc_rate_limit: None, prometheus_config: None, telemetry_endpoints: None, diff --git a/substrate/client/consensus/aura/Cargo.toml b/substrate/client/consensus/aura/Cargo.toml index d1076da35f9d81c80ab868dcd1b88aebc0625a6a..213f75974da0c99cc5c39248a8a789c4989d8047 100644 --- a/substrate/client/consensus/aura/Cargo.toml +++ b/substrate/client/consensus/aura/Cargo.toml @@ -20,7 +20,7 @@ async-trait = "0.1.74" codec = { package = "parity-scale-codec", version = "3.6.1" } futures = "0.3.21" log = { workspace = true, default-features = true } -thiserror = "1.0" +thiserror = { workspace = true } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } sc-block-builder = { path = "../../block-builder" } sc-client-api = { path = "../../api" } diff --git a/substrate/client/consensus/babe/Cargo.toml b/substrate/client/consensus/babe/Cargo.toml index 9bfed7e541b28bf547c14270e401110ce7fcff4f..c98fb7112b7c25bd39d980cab20c188f30b42eff 100644 --- a/substrate/client/consensus/babe/Cargo.toml +++ b/substrate/client/consensus/babe/Cargo.toml @@ -25,7 +25,7 @@ num-bigint = "0.4.3" num-rational = "0.4.1" num-traits = "0.2.17" parking_lot = "0.12.1" -thiserror = "1.0" +thiserror = { workspace = true } fork-tree = { path = "../../../utils/fork-tree" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } sc-client-api = { path = "../../api" } diff --git a/substrate/client/consensus/babe/rpc/Cargo.toml b/substrate/client/consensus/babe/rpc/Cargo.toml index 342c9259cc668e3520efcb9e555aa13828bf23fe..043b566673e7bd5aa581e3043dfa1280aa04d53f 100644 --- a/substrate/client/consensus/babe/rpc/Cargo.toml +++ b/substrate/client/consensus/babe/rpc/Cargo.toml @@ -18,8 +18,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] jsonrpsee = { version = "0.22", features = ["client-core", "macros", "server"] } futures = "0.3.21" -serde = { version = "1.0.196", features = ["derive"] } -thiserror = "1.0" +serde = { features = ["derive"], workspace = true, default-features = true } +thiserror = { workspace = true } sc-consensus-babe = { path = ".." } sc-consensus-epochs = { path = "../../epochs" } sc-rpc-api = { path = "../../../rpc-api" } @@ -33,7 +33,7 @@ sp-keystore = { path = "../../../../primitives/keystore" } sp-runtime = { path = "../../../../primitives/runtime" } [dev-dependencies] -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } tokio = "1.22.0" sc-consensus = { path = "../../common" } sc-keystore = { path = "../../../keystore" } diff --git a/substrate/client/consensus/babe/rpc/src/lib.rs b/substrate/client/consensus/babe/rpc/src/lib.rs index 8b183e7dfdcd3888ef618e1506c5d57be89403f5..a3e811baecffd6de1b9f94952be40d00d8b61de3 100644 --- a/substrate/client/consensus/babe/rpc/src/lib.rs +++ b/substrate/client/consensus/babe/rpc/src/lib.rs @@ -258,7 +258,7 @@ mod tests { let request = r#"{"jsonrpc":"2.0","method":"babe_epochAuthorship","params": [],"id":1}"#; let (response, _) = api.raw_json_request(request, 1).await.unwrap(); - let expected = r#"{"jsonrpc":"2.0","result":{"5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY":{"primary":[0],"secondary":[1,2,4],"secondary_vrf":[]}},"id":1}"#; + let expected = r#"{"jsonrpc":"2.0","result":{"5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY":{"primary":[0],"secondary":[],"secondary_vrf":[1,2,4]}},"id":1}"#; assert_eq!(response, expected); } diff --git a/substrate/client/consensus/beefy/Cargo.toml b/substrate/client/consensus/beefy/Cargo.toml index d6b8a57b98845355667a0d428801c5f0888846f1..8552a49002239aaeaf232c0aebf4180cec207337 100644 --- a/substrate/client/consensus/beefy/Cargo.toml +++ b/substrate/client/consensus/beefy/Cargo.toml @@ -20,7 +20,7 @@ fnv = "1.0.6" futures = "0.3" log = { workspace = true, default-features = true } parking_lot = "0.12.1" -thiserror = "1.0" +thiserror = { workspace = true } wasm-timer = "0.2.5" prometheus = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } sc-client-api = { path = "../../api" } @@ -44,7 +44,7 @@ tokio = "1.22.0" [dev-dependencies] -serde = "1.0.196" +serde = { workspace = true, default-features = true } tempfile = "3.1.0" sc-block-builder = { path = "../../block-builder" } sc-network-test = { path = "../../network/test" } diff --git a/substrate/client/consensus/beefy/rpc/Cargo.toml b/substrate/client/consensus/beefy/rpc/Cargo.toml index e83c95ab106b4d098cebc56aafc1090bd50e6994..bb2ae4a08966707fd93afd48b625ccc4f760762b 100644 --- a/substrate/client/consensus/beefy/rpc/Cargo.toml +++ b/substrate/client/consensus/beefy/rpc/Cargo.toml @@ -17,8 +17,8 @@ futures = "0.3.21" jsonrpsee = { version = "0.22", features = ["client-core", "macros", "server"] } log = { workspace = true, default-features = true } parking_lot = "0.12.1" -serde = { version = "1.0.196", features = ["derive"] } -thiserror = "1.0" +serde = { features = ["derive"], workspace = true, default-features = true } +thiserror = { workspace = true } sc-consensus-beefy = { path = ".." } sp-consensus-beefy = { path = "../../../../primitives/consensus/beefy" } sc-rpc = { path = "../../../rpc" } @@ -26,7 +26,7 @@ sp-core = { path = "../../../../primitives/core" } sp-runtime = { path = "../../../../primitives/runtime" } [dev-dependencies] -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } sc-rpc = { path = "../../../rpc", features = ["test-helpers"] } substrate-test-runtime-client = { path = "../../../../test-utils/runtime/client" } tokio = { version = "1.22.0", features = ["macros"] } diff --git a/substrate/client/consensus/beefy/src/aux_schema.rs b/substrate/client/consensus/beefy/src/aux_schema.rs index 944a00f8372fa8b0643dd4ddff8f8ce684e9f325..534f668ae69c2996064bef086e2958b23f48caf0 100644 --- a/substrate/client/consensus/beefy/src/aux_schema.rs +++ b/substrate/client/consensus/beefy/src/aux_schema.rs @@ -20,7 +20,7 @@ use crate::{error::Error, worker::PersistedState, LOG_TARGET}; use codec::{Decode, Encode}; -use log::{info, trace}; +use log::{debug, trace}; use sc_client_api::{backend::AuxStore, Backend}; use sp_runtime::traits::Block as BlockT; @@ -30,7 +30,7 @@ const WORKER_STATE_KEY: &[u8] = b"beefy_voter_state"; const CURRENT_VERSION: u32 = 4; pub(crate) fn write_current_version(backend: &BE) -> Result<(), Error> { - info!(target: LOG_TARGET, "🥩 write aux schema version {:?}", CURRENT_VERSION); + debug!(target: LOG_TARGET, "🥩 write aux schema version {:?}", CURRENT_VERSION); AuxStore::insert_aux(backend, &[(VERSION_KEY, CURRENT_VERSION.encode().as_slice())], &[]) .map_err(|e| Error::Backend(e.to_string())) } diff --git a/substrate/client/consensus/beefy/src/communication/gossip.rs b/substrate/client/consensus/beefy/src/communication/gossip.rs index 8a0b0a74308f1a6d6bf3bae327b87fac6fb6cdde..eb43c9173d751bf75bc4973aca248d80fb68ca48 100644 --- a/substrate/client/consensus/beefy/src/communication/gossip.rs +++ b/substrate/client/consensus/beefy/src/communication/gossip.rs @@ -56,6 +56,8 @@ pub(super) enum Action { Keep(H, ReputationChange), // discard, applying cost/benefit to originator. Discard(ReputationChange), + // ignore, no cost/benefit applied to originator. + DiscardNoReport, } /// An outcome of examining a message. @@ -68,7 +70,7 @@ enum Consider { /// Message is from the future. Reject. RejectFuture, /// Message cannot be evaluated. Reject. - RejectOutOfScope, + CannotEvaluate, } /// BEEFY gossip message type that gets encoded and sent on the network. @@ -168,18 +170,14 @@ impl Filter { .as_ref() .map(|f| // only from current set and only [filter.start, filter.end] - if set_id < f.validator_set.id() { + if set_id < f.validator_set.id() || round < f.start { Consider::RejectPast - } else if set_id > f.validator_set.id() { - Consider::RejectFuture - } else if round < f.start { - Consider::RejectPast - } else if round > f.end { + } else if set_id > f.validator_set.id() || round > f.end { Consider::RejectFuture } else { Consider::Accept }) - .unwrap_or(Consider::RejectOutOfScope) + .unwrap_or(Consider::CannotEvaluate) } /// Return true if `round` is >= than `max(session_start, best_beefy)`, @@ -199,7 +197,7 @@ impl Filter { Consider::Accept } ) - .unwrap_or(Consider::RejectOutOfScope) + .unwrap_or(Consider::CannotEvaluate) } /// Add new _known_ `round` to the set of seen valid justifications. @@ -244,7 +242,7 @@ where pub(crate) fn new( known_peers: Arc>>, ) -> (GossipValidator, TracingUnboundedReceiver) { - let (tx, rx) = tracing_unbounded("mpsc_beefy_gossip_validator", 10_000); + let (tx, rx) = tracing_unbounded("mpsc_beefy_gossip_validator", 100_000); let val = GossipValidator { votes_topic: votes_topic::(), justifs_topic: proofs_topic::(), @@ -289,7 +287,9 @@ where match filter.consider_vote(round, set_id) { Consider::RejectPast => return Action::Discard(cost::OUTDATED_MESSAGE), Consider::RejectFuture => return Action::Discard(cost::FUTURE_MESSAGE), - Consider::RejectOutOfScope => return Action::Discard(cost::OUT_OF_SCOPE_MESSAGE), + // When we can't evaluate, it's our fault (e.g. filter not initialized yet), we + // discard the vote without punishing or rewarding the sending peer. + Consider::CannotEvaluate => return Action::DiscardNoReport, Consider::Accept => {}, } @@ -330,7 +330,9 @@ where match guard.consider_finality_proof(round, set_id) { Consider::RejectPast => return Action::Discard(cost::OUTDATED_MESSAGE), Consider::RejectFuture => return Action::Discard(cost::FUTURE_MESSAGE), - Consider::RejectOutOfScope => return Action::Discard(cost::OUT_OF_SCOPE_MESSAGE), + // When we can't evaluate, it's our fault (e.g. filter not initialized yet), we + // discard the proof without punishing or rewarding the sending peer. + Consider::CannotEvaluate => return Action::DiscardNoReport, Consider::Accept => {}, } @@ -357,7 +359,9 @@ where Action::Keep(self.justifs_topic, benefit::VALIDATED_PROOF) } }) - .unwrap_or(Action::Discard(cost::OUT_OF_SCOPE_MESSAGE)) + // When we can't evaluate, it's our fault (e.g. filter not initialized yet), we + // discard the proof without punishing or rewarding the sending peer. + .unwrap_or(Action::DiscardNoReport) }; if matches!(action, Action::Keep(_, _)) { self.gossip_filter.write().mark_round_as_proven(round); @@ -404,6 +408,7 @@ where self.report(*sender, cb); ValidationResult::Discard }, + Action::DiscardNoReport => ValidationResult::Discard, } } @@ -579,8 +584,8 @@ pub(crate) mod tests { // filter not initialized let res = gv.validate(&mut context, &sender, &encoded); assert!(matches!(res, ValidationResult::Discard)); - expected_report.cost_benefit = cost::OUT_OF_SCOPE_MESSAGE; - assert_eq!(report_stream.try_recv().unwrap(), expected_report); + // nothing reported + assert!(report_stream.try_recv().is_err()); gv.update_filter(GossipFilterCfg { start: 0, end: 10, validator_set: &validator_set }); // nothing in cache first time diff --git a/substrate/client/consensus/beefy/src/communication/mod.rs b/substrate/client/consensus/beefy/src/communication/mod.rs index 3827559057dde856d201ebb9c9ff71a1d7d11f47..6fda63688e6952839c18e1bbc9ff50ff0c5f7c21 100644 --- a/substrate/client/consensus/beefy/src/communication/mod.rs +++ b/substrate/client/consensus/beefy/src/communication/mod.rs @@ -90,8 +90,6 @@ mod cost { pub(super) const BAD_SIGNATURE: Rep = Rep::new(-100, "BEEFY: Bad signature"); // Message received with vote from voter not in validator set. pub(super) const UNKNOWN_VOTER: Rep = Rep::new(-150, "BEEFY: Unknown voter"); - // A message received that cannot be evaluated relative to our current state. - pub(super) const OUT_OF_SCOPE_MESSAGE: Rep = Rep::new(-500, "BEEFY: Out-of-scope message"); // Message containing invalid proof. pub(super) const INVALID_PROOF: Rep = Rep::new(-5000, "BEEFY: Invalid commit"); // Reputation cost per signature checked for invalid proof. diff --git a/substrate/client/consensus/beefy/src/import.rs b/substrate/client/consensus/beefy/src/import.rs index fc19ecc301422d74d5da5a289ed641810b1022f1..ed8ed68c4e8d0d378728ba87d1ffe726f6c4c11a 100644 --- a/substrate/client/consensus/beefy/src/import.rs +++ b/substrate/client/consensus/beefy/src/import.rs @@ -159,7 +159,7 @@ where // The proof is valid and the block is imported and final, we can import. debug!( target: LOG_TARGET, - "🥩 import justif {:?} for block number {:?}.", proof, number + "🥩 import justif {} for block number {:?}.", proof, number ); // Send the justification to the BEEFY voter for processing. self.justification_sender diff --git a/substrate/client/consensus/beefy/src/lib.rs b/substrate/client/consensus/beefy/src/lib.rs index 1f10d8099d836b55fab2b13a60dcc2faf0f37320..323af1bc8305c38bf7a34b4690b212954b7b0864 100644 --- a/substrate/client/consensus/beefy/src/lib.rs +++ b/substrate/client/consensus/beefy/src/lib.rs @@ -31,7 +31,7 @@ use crate::{ import::BeefyBlockImport, metrics::register_metrics, }; -use futures::{stream::Fuse, StreamExt}; +use futures::{stream::Fuse, FutureExt, StreamExt}; use log::{debug, error, info, warn}; use parking_lot::Mutex; use prometheus::Registry; @@ -40,17 +40,21 @@ use sc_consensus::BlockImport; use sc_network::{NetworkRequest, NotificationService, ProtocolName}; use sc_network_gossip::{GossipEngine, Network as GossipNetwork, Syncing as GossipSyncing}; use sp_api::ProvideRuntimeApi; -use sp_blockchain::{ - Backend as BlockchainBackend, Error as ClientError, HeaderBackend, Result as ClientResult, -}; +use sp_blockchain::{Backend as BlockchainBackend, HeaderBackend}; use sp_consensus::{Error as ConsensusError, SyncOracle}; use sp_consensus_beefy::{ - ecdsa_crypto::AuthorityId, BeefyApi, MmrRootHash, PayloadProvider, ValidatorSet, + ecdsa_crypto::AuthorityId, BeefyApi, ConsensusLog, MmrRootHash, PayloadProvider, ValidatorSet, + BEEFY_ENGINE_ID, }; use sp_keystore::KeystorePtr; use sp_mmr_primitives::MmrApi; use sp_runtime::traits::{Block, Header as HeaderT, NumberFor, Zero}; -use std::{collections::BTreeMap, marker::PhantomData, sync::Arc, time::Duration}; +use std::{ + collections::{BTreeMap, VecDeque}, + marker::PhantomData, + sync::Arc, + time::Duration, +}; mod aux_schema; mod error; @@ -63,9 +67,19 @@ pub mod communication; pub mod import; pub mod justification; +use crate::{ + communication::{gossip::GossipValidator, peers::PeerReport}, + justification::BeefyVersionedFinalityProof, + keystore::BeefyKeystore, + metrics::VoterMetrics, + round::Rounds, + worker::{BeefyWorker, PersistedState}, +}; pub use communication::beefy_protocol_name::{ gossip_protocol_name, justifications_protocol_name as justifs_protocol_name, }; +use sc_utils::mpsc::TracingUnboundedReceiver; +use sp_runtime::generic::OpaqueDigestItemId; #[cfg(test)] mod tests; @@ -209,6 +223,247 @@ pub struct BeefyParams { /// Handler for incoming BEEFY justifications requests from a remote peer. pub on_demand_justifications_handler: BeefyJustifsRequestHandler, } +/// Helper object holding BEEFY worker communication/gossip components. +/// +/// These are created once, but will be reused if worker is restarted/reinitialized. +pub(crate) struct BeefyComms { + pub gossip_engine: GossipEngine, + pub gossip_validator: Arc>, + pub gossip_report_stream: TracingUnboundedReceiver, + pub on_demand_justifications: OnDemandJustificationsEngine, +} + +/// Helper builder object for building [worker::BeefyWorker]. +/// +/// It has to do it in two steps: initialization and build, because the first step can sleep waiting +/// for certain chain and backend conditions, and while sleeping we still need to pump the +/// GossipEngine. Once initialization is done, the GossipEngine (and other pieces) are added to get +/// the complete [worker::BeefyWorker] object. +pub(crate) struct BeefyWorkerBuilder { + // utilities + backend: Arc, + runtime: Arc, + key_store: BeefyKeystore, + // voter metrics + metrics: Option, + persisted_state: PersistedState, +} + +impl BeefyWorkerBuilder +where + B: Block + codec::Codec, + BE: Backend, + R: ProvideRuntimeApi, + R::Api: BeefyApi, +{ + /// This will wait for the chain to enable BEEFY (if not yet enabled) and also wait for the + /// backend to sync all headers required by the voter to build a contiguous chain of mandatory + /// justifications. Then it builds the initial voter state using a combination of previously + /// persisted state in AUX DB and latest chain information/progress. + /// + /// Returns a sane `BeefyWorkerBuilder` that can build the `BeefyWorker`. + pub async fn async_initialize( + backend: Arc, + runtime: Arc, + key_store: BeefyKeystore, + metrics: Option, + min_block_delta: u32, + gossip_validator: Arc>, + finality_notifications: &mut Fuse>, + ) -> Result { + // Wait for BEEFY pallet to be active before starting voter. + let (beefy_genesis, best_grandpa) = + wait_for_runtime_pallet(&*runtime, finality_notifications).await?; + + let persisted_state = Self::load_or_init_state( + beefy_genesis, + best_grandpa, + min_block_delta, + backend.clone(), + runtime.clone(), + &key_store, + &metrics, + ) + .await?; + // Update the gossip validator with the right starting round and set id. + persisted_state + .gossip_filter_config() + .map(|f| gossip_validator.update_filter(f))?; + + Ok(BeefyWorkerBuilder { backend, runtime, key_store, metrics, persisted_state }) + } + + /// Takes rest of missing pieces as params and builds the `BeefyWorker`. + pub fn build( + self, + payload_provider: P, + sync: Arc, + comms: BeefyComms, + links: BeefyVoterLinks, + pending_justifications: BTreeMap, BeefyVersionedFinalityProof>, + ) -> BeefyWorker { + BeefyWorker { + backend: self.backend, + runtime: self.runtime, + key_store: self.key_store, + metrics: self.metrics, + persisted_state: self.persisted_state, + payload_provider, + sync, + comms, + links, + pending_justifications, + } + } + + // If no persisted state present, walk back the chain from first GRANDPA notification to either: + // - latest BEEFY finalized block, or if none found on the way, + // - BEEFY pallet genesis; + // Enqueue any BEEFY mandatory blocks (session boundaries) found on the way, for voter to + // finalize. + async fn init_state( + beefy_genesis: NumberFor, + best_grandpa: ::Header, + min_block_delta: u32, + backend: Arc, + runtime: Arc, + ) -> Result, Error> { + let blockchain = backend.blockchain(); + + let beefy_genesis = runtime + .runtime_api() + .beefy_genesis(best_grandpa.hash()) + .ok() + .flatten() + .filter(|genesis| *genesis == beefy_genesis) + .ok_or_else(|| Error::Backend("BEEFY pallet expected to be active.".into()))?; + // Walk back the imported blocks and initialize voter either, at the last block with + // a BEEFY justification, or at pallet genesis block; voter will resume from there. + let mut sessions = VecDeque::new(); + let mut header = best_grandpa.clone(); + let state = loop { + if let Some(true) = blockchain + .justifications(header.hash()) + .ok() + .flatten() + .map(|justifs| justifs.get(BEEFY_ENGINE_ID).is_some()) + { + debug!( + target: LOG_TARGET, + "🥩 Initialize BEEFY voter at last BEEFY finalized block: {:?}.", + *header.number() + ); + let best_beefy = *header.number(); + // If no session boundaries detected so far, just initialize new rounds here. + if sessions.is_empty() { + let active_set = + expect_validator_set(runtime.as_ref(), backend.as_ref(), &header).await?; + let mut rounds = Rounds::new(best_beefy, active_set); + // Mark the round as already finalized. + rounds.conclude(best_beefy); + sessions.push_front(rounds); + } + let state = PersistedState::checked_new( + best_grandpa, + best_beefy, + sessions, + min_block_delta, + beefy_genesis, + ) + .ok_or_else(|| Error::Backend("Invalid BEEFY chain".into()))?; + break state + } + + if *header.number() == beefy_genesis { + // We've reached BEEFY genesis, initialize voter here. + let genesis_set = + expect_validator_set(runtime.as_ref(), backend.as_ref(), &header).await?; + info!( + target: LOG_TARGET, + "🥩 Loading BEEFY voter state from genesis on what appears to be first startup. \ + Starting voting rounds at block {:?}, genesis validator set {:?}.", + beefy_genesis, + genesis_set, + ); + + sessions.push_front(Rounds::new(beefy_genesis, genesis_set)); + break PersistedState::checked_new( + best_grandpa, + Zero::zero(), + sessions, + min_block_delta, + beefy_genesis, + ) + .ok_or_else(|| Error::Backend("Invalid BEEFY chain".into()))? + } + + if let Some(active) = find_authorities_change::(&header) { + debug!( + target: LOG_TARGET, + "🥩 Marking block {:?} as BEEFY Mandatory.", + *header.number() + ); + sessions.push_front(Rounds::new(*header.number(), active)); + } + + // Move up the chain. + header = wait_for_parent_header(blockchain, header, HEADER_SYNC_DELAY).await?; + }; + + aux_schema::write_current_version(backend.as_ref())?; + aux_schema::write_voter_state(backend.as_ref(), &state)?; + Ok(state) + } + + async fn load_or_init_state( + beefy_genesis: NumberFor, + best_grandpa: ::Header, + min_block_delta: u32, + backend: Arc, + runtime: Arc, + key_store: &BeefyKeystore, + metrics: &Option, + ) -> Result, Error> { + // Initialize voter state from AUX DB if compatible. + if let Some(mut state) = crate::aux_schema::load_persistent(backend.as_ref())? + // Verify state pallet genesis matches runtime. + .filter(|state| state.pallet_genesis() == beefy_genesis) + { + // Overwrite persisted state with current best GRANDPA block. + state.set_best_grandpa(best_grandpa.clone()); + // Overwrite persisted data with newly provided `min_block_delta`. + state.set_min_block_delta(min_block_delta); + debug!(target: LOG_TARGET, "🥩 Loading BEEFY voter state from db: {:?}.", state); + + // Make sure that all the headers that we need have been synced. + let mut new_sessions = vec![]; + let mut header = best_grandpa.clone(); + while *header.number() > state.best_beefy() { + if state.voting_oracle().can_add_session(*header.number()) { + if let Some(active) = find_authorities_change::(&header) { + new_sessions.push((active, *header.number())); + } + } + header = + wait_for_parent_header(backend.blockchain(), header, HEADER_SYNC_DELAY).await?; + } + + // Make sure we didn't miss any sessions during node restart. + for (validator_set, new_session_start) in new_sessions.drain(..).rev() { + debug!( + target: LOG_TARGET, + "🥩 Handling missed BEEFY session after node restart: {:?}.", + new_session_start + ); + state.init_session_at(new_session_start, validator_set, key_store, metrics); + } + return Ok(state) + } + + // No valid voter-state persisted, re-initialize from pallet genesis. + Self::init_state(beefy_genesis, best_grandpa, min_block_delta, backend, runtime).await + } +} /// Start the BEEFY gadget. /// @@ -277,7 +532,7 @@ pub async fn start_beefy_gadget( known_peers, prometheus_registry.clone(), ); - let mut beefy_comms = worker::BeefyComms { + let mut beefy_comms = BeefyComms { gossip_engine, gossip_validator, gossip_report_stream, @@ -287,57 +542,45 @@ pub async fn start_beefy_gadget( // We re-create and re-run the worker in this loop in order to quickly reinit and resume after // select recoverable errors. loop { - // Wait for BEEFY pallet to be active before starting voter. - let (beefy_genesis, best_grandpa) = match wait_for_runtime_pallet( - &*runtime, - &mut beefy_comms.gossip_engine, - &mut finality_notifications, - ) - .await - { - Ok(res) => res, - Err(e) => { - error!(target: LOG_TARGET, "Error: {:?}. Terminating.", e); - return - }, - }; - - let mut worker_base = worker::BeefyWorkerBase { - backend: backend.clone(), - runtime: runtime.clone(), - key_store: key_store.clone().into(), - metrics: metrics.clone(), - _phantom: Default::default(), - }; - - let persisted_state = match worker_base - .load_or_init_state(beefy_genesis, best_grandpa, min_block_delta) - .await - { - Ok(state) => state, - Err(e) => { - error!(target: LOG_TARGET, "Error: {:?}. Terminating.", e); - return - }, + // Make sure to pump gossip engine while waiting for initialization conditions. + let worker_builder = loop { + futures::select! { + builder_init_result = BeefyWorkerBuilder::async_initialize( + backend.clone(), + runtime.clone(), + key_store.clone().into(), + metrics.clone(), + min_block_delta, + beefy_comms.gossip_validator.clone(), + &mut finality_notifications, + ).fuse() => { + match builder_init_result { + Ok(builder) => break builder, + Err(e) => { + error!(target: LOG_TARGET, "🥩 Error: {:?}. Terminating.", e); + return + }, + } + }, + // Pump peer reports + _ = &mut beefy_comms.gossip_report_stream.next() => { + continue + }, + // Pump gossip engine. + _ = &mut beefy_comms.gossip_engine => { + error!(target: LOG_TARGET, "🥩 Gossip engine has unexpectedly terminated."); + return + } + } }; - // Update the gossip validator with the right starting round and set id. - if let Err(e) = persisted_state - .gossip_filter_config() - .map(|f| beefy_comms.gossip_validator.update_filter(f)) - { - error!(target: LOG_TARGET, "Error: {:?}. Terminating.", e); - return - } - let worker = worker::BeefyWorker { - base: worker_base, - payload_provider: payload_provider.clone(), - sync: sync.clone(), - comms: beefy_comms, - links: links.clone(), - pending_justifications: BTreeMap::new(), - persisted_state, - }; + let worker = worker_builder.build( + payload_provider.clone(), + sync.clone(), + beefy_comms, + links.clone(), + BTreeMap::new(), + ); match futures::future::select( Box::pin(worker.run(&mut block_import_justif, &mut finality_notifications)), @@ -404,9 +647,8 @@ where /// Should be called only once during worker initialization. async fn wait_for_runtime_pallet( runtime: &R, - mut gossip_engine: &mut GossipEngine, finality: &mut Fuse>, -) -> ClientResult<(NumberFor, ::Header)> +) -> Result<(NumberFor, ::Header), Error> where B: Block, R: ProvideRuntimeApi, @@ -414,33 +656,24 @@ where { info!(target: LOG_TARGET, "🥩 BEEFY gadget waiting for BEEFY pallet to become available..."); loop { - futures::select! { - notif = finality.next() => { - let notif = match notif { - Some(notif) => notif, - None => break - }; - let at = notif.header.hash(); - if let Some(start) = runtime.runtime_api().beefy_genesis(at).ok().flatten() { - if *notif.header.number() >= start { - // Beefy pallet available, return header for best grandpa at the time. - info!( - target: LOG_TARGET, - "🥩 BEEFY pallet available: block {:?} beefy genesis {:?}", - notif.header.number(), start - ); - return Ok((start, notif.header)) - } - } - }, - _ = gossip_engine => { - break + let notif = finality.next().await.ok_or_else(|| { + let err_msg = "🥩 Finality stream has unexpectedly terminated.".into(); + error!(target: LOG_TARGET, "{}", err_msg); + Error::Backend(err_msg) + })?; + let at = notif.header.hash(); + if let Some(start) = runtime.runtime_api().beefy_genesis(at).ok().flatten() { + if *notif.header.number() >= start { + // Beefy pallet available, return header for best grandpa at the time. + info!( + target: LOG_TARGET, + "🥩 BEEFY pallet available: block {:?} beefy genesis {:?}", + notif.header.number(), start + ); + return Ok((start, notif.header)) } } } - let err_msg = "🥩 Gossip engine has unexpectedly terminated.".into(); - error!(target: LOG_TARGET, "{}", err_msg); - Err(ClientError::Backend(err_msg)) } /// Provides validator set active `at_header`. It tries to get it from state, otherwise falls @@ -460,17 +693,21 @@ where R::Api: BeefyApi, { let blockchain = backend.blockchain(); - // Walk up the chain looking for the validator set active at 'at_header'. Process both state and // header digests. - debug!(target: LOG_TARGET, "🥩 Trying to find validator set active at header: {:?}", at_header); + debug!( + target: LOG_TARGET, + "🥩 Trying to find validator set active at header(number {:?}, hash {:?})", + at_header.number(), + at_header.hash() + ); let mut header = at_header.clone(); loop { debug!(target: LOG_TARGET, "🥩 Looking for auth set change at block number: {:?}", *header.number()); if let Ok(Some(active)) = runtime.runtime_api().validator_set(header.hash()) { return Ok(active) } else { - match worker::find_authorities_change::(&header) { + match find_authorities_change::(&header) { Some(active) => return Ok(active), // Move up the chain. Ultimately we'll get it from chain genesis state, or error out // there. @@ -482,3 +719,18 @@ where } } } + +/// Scan the `header` digest log for a BEEFY validator set change. Return either the new +/// validator set or `None` in case no validator set change has been signaled. +pub(crate) fn find_authorities_change(header: &B::Header) -> Option> +where + B: Block, +{ + let id = OpaqueDigestItemId::Consensus(&BEEFY_ENGINE_ID); + + let filter = |log: ConsensusLog| match log { + ConsensusLog::AuthoritiesChange(validator_set) => Some(validator_set), + _ => None, + }; + header.digest().convert_first(|l| l.try_to(id).and_then(filter)) +} diff --git a/substrate/client/consensus/beefy/src/tests.rs b/substrate/client/consensus/beefy/src/tests.rs index 0e6ff210b158c18334a749992d096e2d54c77a01..d106c9dcd88165f929085972483e090dbf78c559 100644 --- a/substrate/client/consensus/beefy/src/tests.rs +++ b/substrate/client/consensus/beefy/src/tests.rs @@ -32,8 +32,8 @@ use crate::{ gossip_protocol_name, justification::*, wait_for_runtime_pallet, - worker::{BeefyWorkerBase, PersistedState}, - BeefyRPCLinks, BeefyVoterLinks, KnownPeers, + worker::PersistedState, + BeefyRPCLinks, BeefyVoterLinks, BeefyWorkerBuilder, KnownPeers, }; use futures::{future, stream::FuturesUnordered, Future, FutureExt, StreamExt}; use parking_lot::Mutex; @@ -368,27 +368,19 @@ async fn voter_init_setup( api: &TestApi, ) -> Result, Error> { let backend = net.peer(0).client().as_backend(); - let known_peers = Arc::new(Mutex::new(KnownPeers::new())); - let (gossip_validator, _) = GossipValidator::new(known_peers); - let gossip_validator = Arc::new(gossip_validator); - let mut gossip_engine = sc_network_gossip::GossipEngine::new( - net.peer(0).network_service().clone(), - net.peer(0).sync_service().clone(), - net.peer(0).take_notification_service(&beefy_gossip_proto_name()).unwrap(), - "/beefy/whatever", - gossip_validator, - None, - ); - let (beefy_genesis, best_grandpa) = - wait_for_runtime_pallet(api, &mut gossip_engine, finality).await.unwrap(); - let mut worker_base = BeefyWorkerBase { + let (beefy_genesis, best_grandpa) = wait_for_runtime_pallet(api, finality).await.unwrap(); + let key_store = None.into(); + let metrics = None; + BeefyWorkerBuilder::load_or_init_state( + beefy_genesis, + best_grandpa, + 1, backend, - runtime: Arc::new(api.clone()), - key_store: None.into(), - metrics: None, - _phantom: Default::default(), - }; - worker_base.load_or_init_state(beefy_genesis, best_grandpa, 1).await + Arc::new(api.clone()), + &key_store, + &metrics, + ) + .await } // Spawns beefy voters. Returns a future to spawn on the runtime. @@ -1065,32 +1057,7 @@ async fn should_initialize_voter_at_custom_genesis() { net.peer(0).client().as_client().finalize_block(hashes[8], None).unwrap(); // load persistent state - nothing in DB, should init at genesis - // - // NOTE: code from `voter_init_setup()` is moved here because the new network event system - // doesn't allow creating a new `GossipEngine` as the notification handle is consumed by the - // first `GossipEngine` - let known_peers = Arc::new(Mutex::new(KnownPeers::new())); - let (gossip_validator, _) = GossipValidator::new(known_peers); - let gossip_validator = Arc::new(gossip_validator); - let mut gossip_engine = sc_network_gossip::GossipEngine::new( - net.peer(0).network_service().clone(), - net.peer(0).sync_service().clone(), - net.peer(0).take_notification_service(&beefy_gossip_proto_name()).unwrap(), - "/beefy/whatever", - gossip_validator, - None, - ); - let (beefy_genesis, best_grandpa) = - wait_for_runtime_pallet(&api, &mut gossip_engine, &mut finality).await.unwrap(); - let mut worker_base = BeefyWorkerBase { - backend: backend.clone(), - runtime: Arc::new(api), - key_store: None.into(), - metrics: None, - _phantom: Default::default(), - }; - let persisted_state = - worker_base.load_or_init_state(beefy_genesis, best_grandpa, 1).await.unwrap(); + let persisted_state = voter_init_setup(&mut net, &mut finality, &api).await.unwrap(); // Test initialization at session boundary. // verify voter initialized with single session starting at block `custom_pallet_genesis` (7) @@ -1120,18 +1087,7 @@ async fn should_initialize_voter_at_custom_genesis() { net.peer(0).client().as_client().finalize_block(hashes[10], None).unwrap(); // load persistent state - state preset in DB, but with different pallet genesis - // the network state persists and uses the old `GossipEngine` initialized for `peer(0)` - let (beefy_genesis, best_grandpa) = - wait_for_runtime_pallet(&api, &mut gossip_engine, &mut finality).await.unwrap(); - let mut worker_base = BeefyWorkerBase { - backend: backend.clone(), - runtime: Arc::new(api), - key_store: None.into(), - metrics: None, - _phantom: Default::default(), - }; - let new_persisted_state = - worker_base.load_or_init_state(beefy_genesis, best_grandpa, 1).await.unwrap(); + let new_persisted_state = voter_init_setup(&mut net, &mut finality, &api).await.unwrap(); // verify voter initialized with single session starting at block `new_pallet_genesis` (10) let sessions = new_persisted_state.voting_oracle().sessions(); @@ -1322,32 +1278,7 @@ async fn should_catch_up_when_loading_saved_voter_state() { let api = TestApi::with_validator_set(&validator_set); // load persistent state - nothing in DB, should init at genesis - // - // NOTE: code from `voter_init_setup()` is moved here because the new network event system - // doesn't allow creating a new `GossipEngine` as the notification handle is consumed by the - // first `GossipEngine` - let known_peers = Arc::new(Mutex::new(KnownPeers::new())); - let (gossip_validator, _) = GossipValidator::new(known_peers); - let gossip_validator = Arc::new(gossip_validator); - let mut gossip_engine = sc_network_gossip::GossipEngine::new( - net.peer(0).network_service().clone(), - net.peer(0).sync_service().clone(), - net.peer(0).take_notification_service(&beefy_gossip_proto_name()).unwrap(), - "/beefy/whatever", - gossip_validator, - None, - ); - let (beefy_genesis, best_grandpa) = - wait_for_runtime_pallet(&api, &mut gossip_engine, &mut finality).await.unwrap(); - let mut worker_base = BeefyWorkerBase { - backend: backend.clone(), - runtime: Arc::new(api.clone()), - key_store: None.into(), - metrics: None, - _phantom: Default::default(), - }; - let persisted_state = - worker_base.load_or_init_state(beefy_genesis, best_grandpa, 1).await.unwrap(); + let persisted_state = voter_init_setup(&mut net, &mut finality, &api).await.unwrap(); // Test initialization at session boundary. // verify voter initialized with two sessions starting at blocks 1 and 10 @@ -1374,18 +1305,7 @@ async fn should_catch_up_when_loading_saved_voter_state() { // finalize 25 without justifications net.peer(0).client().as_client().finalize_block(hashes[25], None).unwrap(); // load persistent state - state preset in DB - // the network state persists and uses the old `GossipEngine` initialized for `peer(0)` - let (beefy_genesis, best_grandpa) = - wait_for_runtime_pallet(&api, &mut gossip_engine, &mut finality).await.unwrap(); - let mut worker_base = BeefyWorkerBase { - backend: backend.clone(), - runtime: Arc::new(api), - key_store: None.into(), - metrics: None, - _phantom: Default::default(), - }; - let persisted_state = - worker_base.load_or_init_state(beefy_genesis, best_grandpa, 1).await.unwrap(); + let persisted_state = voter_init_setup(&mut net, &mut finality, &api).await.unwrap(); // Verify voter initialized with old sessions plus a new one starting at block 20. // There shouldn't be any duplicates. diff --git a/substrate/client/consensus/beefy/src/worker.rs b/substrate/client/consensus/beefy/src/worker.rs index 19a24b9bc1a548936483537f7ea057cba1592d8c..c8eb19621ba5a6e45ea2be93e1e2e830492a9b81 100644 --- a/substrate/client/consensus/beefy/src/worker.rs +++ b/substrate/client/consensus/beefy/src/worker.rs @@ -17,46 +17,42 @@ // along with this program. If not, see . use crate::{ - aux_schema, communication::{ - gossip::{proofs_topic, votes_topic, GossipFilterCfg, GossipMessage, GossipValidator}, + gossip::{proofs_topic, votes_topic, GossipFilterCfg, GossipMessage}, peers::PeerReport, - request_response::outgoing_requests_engine::{OnDemandJustificationsEngine, ResponseInfo}, + request_response::outgoing_requests_engine::ResponseInfo, }, error::Error, - expect_validator_set, + find_authorities_change, justification::BeefyVersionedFinalityProof, keystore::BeefyKeystore, metric_inc, metric_set, metrics::VoterMetrics, round::{Rounds, VoteImportResult}, - wait_for_parent_header, BeefyVoterLinks, HEADER_SYNC_DELAY, LOG_TARGET, + BeefyComms, BeefyVoterLinks, LOG_TARGET, }; use codec::{Codec, Decode, DecodeAll, Encode}; use futures::{stream::Fuse, FutureExt, StreamExt}; use log::{debug, error, info, log_enabled, trace, warn}; use sc_client_api::{Backend, FinalityNotification, FinalityNotifications, HeaderBackend}; -use sc_network_gossip::GossipEngine; -use sc_utils::{mpsc::TracingUnboundedReceiver, notification::NotificationReceiver}; +use sc_utils::notification::NotificationReceiver; use sp_api::ProvideRuntimeApi; use sp_arithmetic::traits::{AtLeast32Bit, Saturating}; -use sp_blockchain::Backend as BlockchainBackend; use sp_consensus::SyncOracle; use sp_consensus_beefy::{ check_equivocation_proof, ecdsa_crypto::{AuthorityId, Signature}, - BeefyApi, BeefySignatureHasher, Commitment, ConsensusLog, EquivocationProof, PayloadProvider, - ValidatorSet, VersionedFinalityProof, VoteMessage, BEEFY_ENGINE_ID, + BeefyApi, BeefySignatureHasher, Commitment, EquivocationProof, PayloadProvider, ValidatorSet, + VersionedFinalityProof, VoteMessage, BEEFY_ENGINE_ID, }; use sp_runtime::{ - generic::{BlockId, OpaqueDigestItemId}, + generic::BlockId, traits::{Block, Header, NumberFor, Zero}, SaturatedConversion, }; use std::{ collections::{BTreeMap, BTreeSet, VecDeque}, fmt::Debug, - marker::PhantomData, sync::Arc, }; @@ -180,8 +176,8 @@ impl VoterOracle { } } - // Check if an observed session can be added to the Oracle. - fn can_add_session(&self, session_start: NumberFor) -> bool { + /// Check if an observed session can be added to the Oracle. + pub fn can_add_session(&self, session_start: NumberFor) -> bool { let latest_known_session_start = self.sessions.back().map(|session| session.session_start()); Some(session_start) > latest_known_session_start @@ -319,229 +315,28 @@ impl PersistedState { self.voting_oracle.best_grandpa_block_header = best_grandpa; } + pub fn voting_oracle(&self) -> &VoterOracle { + &self.voting_oracle + } + pub(crate) fn gossip_filter_config(&self) -> Result, Error> { let (start, end) = self.voting_oracle.accepted_interval()?; let validator_set = self.voting_oracle.current_validator_set()?; Ok(GossipFilterCfg { start, end, validator_set }) } -} - -/// Helper object holding BEEFY worker communication/gossip components. -/// -/// These are created once, but will be reused if worker is restarted/reinitialized. -pub(crate) struct BeefyComms { - pub gossip_engine: GossipEngine, - pub gossip_validator: Arc>, - pub gossip_report_stream: TracingUnboundedReceiver, - pub on_demand_justifications: OnDemandJustificationsEngine, -} - -pub(crate) struct BeefyWorkerBase { - // utilities - pub backend: Arc, - pub runtime: Arc, - pub key_store: BeefyKeystore, - - /// BEEFY client metrics. - pub metrics: Option, - - pub _phantom: PhantomData, -} - -impl BeefyWorkerBase -where - B: Block + Codec, - BE: Backend, - R: ProvideRuntimeApi, - R::Api: BeefyApi, -{ - // If no persisted state present, walk back the chain from first GRANDPA notification to either: - // - latest BEEFY finalized block, or if none found on the way, - // - BEEFY pallet genesis; - // Enqueue any BEEFY mandatory blocks (session boundaries) found on the way, for voter to - // finalize. - async fn init_state( - &self, - beefy_genesis: NumberFor, - best_grandpa: ::Header, - min_block_delta: u32, - ) -> Result, Error> { - let blockchain = self.backend.blockchain(); - - let beefy_genesis = self - .runtime - .runtime_api() - .beefy_genesis(best_grandpa.hash()) - .ok() - .flatten() - .filter(|genesis| *genesis == beefy_genesis) - .ok_or_else(|| Error::Backend("BEEFY pallet expected to be active.".into()))?; - // Walk back the imported blocks and initialize voter either, at the last block with - // a BEEFY justification, or at pallet genesis block; voter will resume from there. - let mut sessions = VecDeque::new(); - let mut header = best_grandpa.clone(); - let state = loop { - if let Some(true) = blockchain - .justifications(header.hash()) - .ok() - .flatten() - .map(|justifs| justifs.get(BEEFY_ENGINE_ID).is_some()) - { - info!( - target: LOG_TARGET, - "🥩 Initialize BEEFY voter at last BEEFY finalized block: {:?}.", - *header.number() - ); - let best_beefy = *header.number(); - // If no session boundaries detected so far, just initialize new rounds here. - if sessions.is_empty() { - let active_set = - expect_validator_set(self.runtime.as_ref(), self.backend.as_ref(), &header) - .await?; - let mut rounds = Rounds::new(best_beefy, active_set); - // Mark the round as already finalized. - rounds.conclude(best_beefy); - sessions.push_front(rounds); - } - let state = PersistedState::checked_new( - best_grandpa, - best_beefy, - sessions, - min_block_delta, - beefy_genesis, - ) - .ok_or_else(|| Error::Backend("Invalid BEEFY chain".into()))?; - break state - } - - if *header.number() == beefy_genesis { - // We've reached BEEFY genesis, initialize voter here. - let genesis_set = - expect_validator_set(self.runtime.as_ref(), self.backend.as_ref(), &header) - .await?; - info!( - target: LOG_TARGET, - "🥩 Loading BEEFY voter state from genesis on what appears to be first startup. \ - Starting voting rounds at block {:?}, genesis validator set {:?}.", - beefy_genesis, - genesis_set, - ); - - sessions.push_front(Rounds::new(beefy_genesis, genesis_set)); - break PersistedState::checked_new( - best_grandpa, - Zero::zero(), - sessions, - min_block_delta, - beefy_genesis, - ) - .ok_or_else(|| Error::Backend("Invalid BEEFY chain".into()))? - } - - if let Some(active) = find_authorities_change::(&header) { - info!( - target: LOG_TARGET, - "🥩 Marking block {:?} as BEEFY Mandatory.", - *header.number() - ); - sessions.push_front(Rounds::new(*header.number(), active)); - } - - // Move up the chain. - header = wait_for_parent_header(blockchain, header, HEADER_SYNC_DELAY).await?; - }; - - aux_schema::write_current_version(self.backend.as_ref())?; - aux_schema::write_voter_state(self.backend.as_ref(), &state)?; - Ok(state) - } - - pub async fn load_or_init_state( - &mut self, - beefy_genesis: NumberFor, - best_grandpa: ::Header, - min_block_delta: u32, - ) -> Result, Error> { - // Initialize voter state from AUX DB if compatible. - if let Some(mut state) = crate::aux_schema::load_persistent(self.backend.as_ref())? - // Verify state pallet genesis matches runtime. - .filter(|state| state.pallet_genesis() == beefy_genesis) - { - // Overwrite persisted state with current best GRANDPA block. - state.set_best_grandpa(best_grandpa.clone()); - // Overwrite persisted data with newly provided `min_block_delta`. - state.set_min_block_delta(min_block_delta); - info!(target: LOG_TARGET, "🥩 Loading BEEFY voter state from db: {:?}.", state); - - // Make sure that all the headers that we need have been synced. - let mut new_sessions = vec![]; - let mut header = best_grandpa.clone(); - while *header.number() > state.best_beefy() { - if state.voting_oracle.can_add_session(*header.number()) { - if let Some(active) = find_authorities_change::(&header) { - new_sessions.push((active, *header.number())); - } - } - header = - wait_for_parent_header(self.backend.blockchain(), header, HEADER_SYNC_DELAY) - .await?; - } - - // Make sure we didn't miss any sessions during node restart. - for (validator_set, new_session_start) in new_sessions.drain(..).rev() { - info!( - target: LOG_TARGET, - "🥩 Handling missed BEEFY session after node restart: {:?}.", - new_session_start - ); - self.init_session_at(&mut state, validator_set, new_session_start); - } - return Ok(state) - } - - // No valid voter-state persisted, re-initialize from pallet genesis. - self.init_state(beefy_genesis, best_grandpa, min_block_delta).await - } - - /// Verify `active` validator set for `block` against the key store - /// - /// We want to make sure that we have _at least one_ key in our keystore that - /// is part of the validator set, that's because if there are no local keys - /// then we can't perform our job as a validator. - /// - /// Note that for a non-authority node there will be no keystore, and we will - /// return an error and don't check. The error can usually be ignored. - fn verify_validator_set( - &self, - block: &NumberFor, - active: &ValidatorSet, - ) -> Result<(), Error> { - let active: BTreeSet<&AuthorityId> = active.validators().iter().collect(); - - let public_keys = self.key_store.public_keys()?; - let store: BTreeSet<&AuthorityId> = public_keys.iter().collect(); - - if store.intersection(&active).count() == 0 { - let msg = "no authority public key found in store".to_string(); - debug!(target: LOG_TARGET, "🥩 for block {:?} {}", block, msg); - metric_inc!(self.metrics, beefy_no_authority_found_in_store); - Err(Error::Keystore(msg)) - } else { - Ok(()) - } - } /// Handle session changes by starting new voting round for mandatory blocks. - fn init_session_at( + pub fn init_session_at( &mut self, - persisted_state: &mut PersistedState, - validator_set: ValidatorSet, new_session_start: NumberFor, + validator_set: ValidatorSet, + key_store: &BeefyKeystore, + metrics: &Option, ) { debug!(target: LOG_TARGET, "🥩 New active validator set: {:?}", validator_set); // BEEFY should finalize a mandatory block during each session. - if let Ok(active_session) = persisted_state.voting_oracle.active_rounds() { + if let Ok(active_session) = self.voting_oracle.active_rounds() { if !active_session.mandatory_done() { debug!( target: LOG_TARGET, @@ -549,20 +344,20 @@ where validator_set.id(), active_session.validator_set_id(), ); - metric_inc!(self.metrics, beefy_lagging_sessions); + metric_inc!(metrics, beefy_lagging_sessions); } } if log_enabled!(target: LOG_TARGET, log::Level::Debug) { // verify the new validator set - only do it if we're also logging the warning - let _ = self.verify_validator_set(&new_session_start, &validator_set); + if verify_validator_set::(&new_session_start, &validator_set, key_store).is_err() { + metric_inc!(metrics, beefy_no_authority_found_in_store); + } } let id = validator_set.id(); - persisted_state - .voting_oracle - .add_session(Rounds::new(new_session_start, validator_set)); - metric_set!(self.metrics, beefy_validator_set_id, id); + self.voting_oracle.add_session(Rounds::new(new_session_start, validator_set)); + metric_set!(metrics, beefy_validator_set_id, id); info!( target: LOG_TARGET, "🥩 New Rounds for validator set id: {:?} with session_start {:?}", @@ -574,9 +369,10 @@ where /// A BEEFY worker/voter that follows the BEEFY protocol pub(crate) struct BeefyWorker { - pub base: BeefyWorkerBase, - - // utils + // utilities + pub backend: Arc, + pub runtime: Arc, + pub key_store: BeefyKeystore, pub payload_provider: P, pub sync: Arc, @@ -592,6 +388,8 @@ pub(crate) struct BeefyWorker { pub pending_justifications: BTreeMap, BeefyVersionedFinalityProof>, /// Persisted voter state. pub persisted_state: PersistedState, + /// BEEFY voter metrics + pub metrics: Option, } impl BeefyWorker @@ -622,24 +420,28 @@ where validator_set: ValidatorSet, new_session_start: NumberFor, ) { - self.base - .init_session_at(&mut self.persisted_state, validator_set, new_session_start); + self.persisted_state.init_session_at( + new_session_start, + validator_set, + &self.key_store, + &self.metrics, + ); } fn handle_finality_notification( &mut self, notification: &FinalityNotification, ) -> Result<(), Error> { + let header = ¬ification.header; debug!( target: LOG_TARGET, - "🥩 Finality notification: header {:?} tree_route {:?}", - notification.header, + "🥩 Finality notification: header(number {:?}, hash {:?}) tree_route {:?}", + header.number(), + header.hash(), notification.tree_route, ); - let header = ¬ification.header; - self.base - .runtime + self.runtime .runtime_api() .beefy_genesis(header.hash()) .ok() @@ -653,7 +455,7 @@ where self.persisted_state.set_best_grandpa(header.clone()); // Check all (newly) finalized blocks for new session(s). - let backend = self.base.backend.clone(); + let backend = self.backend.clone(); for header in notification .tree_route .iter() @@ -672,7 +474,7 @@ where } if new_session_added { - crate::aux_schema::write_voter_state(&*self.base.backend, &self.persisted_state) + crate::aux_schema::write_voter_state(&*self.backend, &self.persisted_state) .map_err(|e| Error::Backend(e.to_string()))?; } @@ -706,7 +508,7 @@ where true, ); }, - RoundAction::Drop => metric_inc!(self.base.metrics, beefy_stale_votes), + RoundAction::Drop => metric_inc!(self.metrics, beefy_stale_votes), RoundAction::Enqueue => error!(target: LOG_TARGET, "🥩 unexpected vote: {:?}.", vote), }; Ok(()) @@ -726,23 +528,23 @@ where match self.voting_oracle().triage_round(block_num)? { RoundAction::Process => { debug!(target: LOG_TARGET, "🥩 Process justification for round: {:?}.", block_num); - metric_inc!(self.base.metrics, beefy_imported_justifications); + metric_inc!(self.metrics, beefy_imported_justifications); self.finalize(justification)? }, RoundAction::Enqueue => { debug!(target: LOG_TARGET, "🥩 Buffer justification for round: {:?}.", block_num); if self.pending_justifications.len() < MAX_BUFFERED_JUSTIFICATIONS { self.pending_justifications.entry(block_num).or_insert(justification); - metric_inc!(self.base.metrics, beefy_buffered_justifications); + metric_inc!(self.metrics, beefy_buffered_justifications); } else { - metric_inc!(self.base.metrics, beefy_buffered_justifications_dropped); + metric_inc!(self.metrics, beefy_buffered_justifications_dropped); warn!( target: LOG_TARGET, "🥩 Buffer justification dropped for round: {:?}.", block_num ); } }, - RoundAction::Drop => metric_inc!(self.base.metrics, beefy_stale_justifications), + RoundAction::Drop => metric_inc!(self.metrics, beefy_stale_justifications), }; Ok(()) } @@ -757,14 +559,14 @@ where match rounds.add_vote(vote) { VoteImportResult::RoundConcluded(signed_commitment) => { let finality_proof = VersionedFinalityProof::V1(signed_commitment); - info!( + debug!( target: LOG_TARGET, "🥩 Round #{} concluded, finality_proof: {:?}.", block_number, finality_proof ); // We created the `finality_proof` and know to be valid. // New state is persisted after finalization. self.finalize(finality_proof.clone())?; - metric_inc!(self.base.metrics, beefy_good_votes_processed); + metric_inc!(self.metrics, beefy_good_votes_processed); return Ok(Some(finality_proof)) }, VoteImportResult::Ok => { @@ -775,20 +577,17 @@ where .map(|(mandatory_num, _)| mandatory_num == block_number) .unwrap_or(false) { - crate::aux_schema::write_voter_state( - &*self.base.backend, - &self.persisted_state, - ) - .map_err(|e| Error::Backend(e.to_string()))?; + crate::aux_schema::write_voter_state(&*self.backend, &self.persisted_state) + .map_err(|e| Error::Backend(e.to_string()))?; } - metric_inc!(self.base.metrics, beefy_good_votes_processed); + metric_inc!(self.metrics, beefy_good_votes_processed); }, VoteImportResult::Equivocation(proof) => { - metric_inc!(self.base.metrics, beefy_equivocation_votes); + metric_inc!(self.metrics, beefy_equivocation_votes); self.report_equivocation(proof)?; }, - VoteImportResult::Invalid => metric_inc!(self.base.metrics, beefy_invalid_votes), - VoteImportResult::Stale => metric_inc!(self.base.metrics, beefy_stale_votes), + VoteImportResult::Invalid => metric_inc!(self.metrics, beefy_invalid_votes), + VoteImportResult::Stale => metric_inc!(self.metrics, beefy_stale_votes), }; Ok(None) } @@ -815,15 +614,14 @@ where // Set new best BEEFY block number. self.persisted_state.set_best_beefy(block_num); - crate::aux_schema::write_voter_state(&*self.base.backend, &self.persisted_state) + crate::aux_schema::write_voter_state(&*self.backend, &self.persisted_state) .map_err(|e| Error::Backend(e.to_string()))?; - metric_set!(self.base.metrics, beefy_best_block, block_num); + metric_set!(self.metrics, beefy_best_block, block_num); self.comms.on_demand_justifications.cancel_requests_older_than(block_num); if let Err(e) = self - .base .backend .blockchain() .expect_block_hash_from_id(&BlockId::Number(block_num)) @@ -833,8 +631,7 @@ where .notify(|| Ok::<_, ()>(hash)) .expect("forwards closure result; the closure always returns Ok; qed."); - self.base - .backend + self.backend .append_justification(hash, (BEEFY_ENGINE_ID, finality_proof.encode())) }) { debug!( @@ -871,13 +668,13 @@ where for (num, justification) in justifs_to_process.into_iter() { debug!(target: LOG_TARGET, "🥩 Handle buffered justification for: {:?}.", num); - metric_inc!(self.base.metrics, beefy_imported_justifications); + metric_inc!(self.metrics, beefy_imported_justifications); if let Err(err) = self.finalize(justification) { error!(target: LOG_TARGET, "🥩 Error finalizing block: {}", err); } } metric_set!( - self.base.metrics, + self.metrics, beefy_buffered_justifications, self.pending_justifications.len() ); @@ -889,7 +686,7 @@ where fn try_to_vote(&mut self) -> Result<(), Error> { // Vote if there's now a new vote target. if let Some(target) = self.voting_oracle().voting_target() { - metric_set!(self.base.metrics, beefy_should_vote_on, target); + metric_set!(self.metrics, beefy_should_vote_on, target); if target > self.persisted_state.best_voted { self.do_vote(target)?; } @@ -909,7 +706,6 @@ where self.persisted_state.voting_oracle.best_grandpa_block_header.clone() } else { let hash = self - .base .backend .blockchain() .expect_block_hash_from_id(&BlockId::Number(target_number)) @@ -921,7 +717,7 @@ where Error::Backend(err_msg) })?; - self.base.backend.blockchain().expect_header(hash).map_err(|err| { + self.backend.blockchain().expect_header(hash).map_err(|err| { let err_msg = format!( "Couldn't get header for block #{:?} ({:?}) (error: {:?}), skipping vote..", target_number, hash, err @@ -941,7 +737,7 @@ where let rounds = self.persisted_state.voting_oracle.active_rounds_mut()?; let (validators, validator_set_id) = (rounds.validators(), rounds.validator_set_id()); - let authority_id = if let Some(id) = self.base.key_store.authority_id(validators) { + let authority_id = if let Some(id) = self.key_store.authority_id(validators) { debug!(target: LOG_TARGET, "🥩 Local authority id: {:?}", id); id } else { @@ -955,7 +751,7 @@ where let commitment = Commitment { payload, block_number: target_number, validator_set_id }; let encoded_commitment = commitment.encode(); - let signature = match self.base.key_store.sign(&authority_id, &encoded_commitment) { + let signature = match self.key_store.sign(&authority_id, &encoded_commitment) { Ok(sig) => sig, Err(err) => { warn!(target: LOG_TARGET, "🥩 Error signing commitment: {:?}", err); @@ -980,7 +776,7 @@ where .gossip_engine .gossip_message(proofs_topic::(), encoded_proof, true); } else { - metric_inc!(self.base.metrics, beefy_votes_sent); + metric_inc!(self.metrics, beefy_votes_sent); debug!(target: LOG_TARGET, "🥩 Sent vote message: {:?}", vote); let encoded_vote = GossipMessage::::Vote(vote).encode(); self.comms.gossip_engine.gossip_message(votes_topic::(), encoded_vote, false); @@ -988,8 +784,8 @@ where // Persist state after vote to avoid double voting in case of voter restarts. self.persisted_state.best_voted = target_number; - metric_set!(self.base.metrics, beefy_best_voted, target_number); - crate::aux_schema::write_voter_state(&*self.base.backend, &self.persisted_state) + metric_set!(self.metrics, beefy_best_voted, target_number); + crate::aux_schema::write_voter_state(&*self.backend, &self.persisted_state) .map_err(|e| Error::Backend(e.to_string())) } @@ -1163,16 +959,15 @@ where if !check_equivocation_proof::<_, _, BeefySignatureHasher>(&proof) { debug!(target: LOG_TARGET, "🥩 Skip report for bad equivocation {:?}", proof); return Ok(()) - } else if let Some(local_id) = self.base.key_store.authority_id(validators) { + } else if let Some(local_id) = self.key_store.authority_id(validators) { if offender_id == local_id { - debug!(target: LOG_TARGET, "🥩 Skip equivocation report for own equivocation"); + warn!(target: LOG_TARGET, "🥩 Skip equivocation report for own equivocation"); return Ok(()) } } let number = *proof.round_number(); let hash = self - .base .backend .blockchain() .expect_block_hash_from_id(&BlockId::Number(number)) @@ -1183,7 +978,7 @@ where ); Error::Backend(err_msg) })?; - let runtime_api = self.base.runtime.runtime_api(); + let runtime_api = self.runtime.runtime_api(); // generate key ownership proof at that block let key_owner_proof = match runtime_api .generate_key_ownership_proof(hash, validator_set_id, offender_id) @@ -1200,7 +995,7 @@ where }; // submit equivocation report at **best** block - let best_block_hash = self.base.backend.blockchain().info().best_hash; + let best_block_hash = self.backend.blockchain().info().best_hash; runtime_api .submit_report_equivocation_unsigned_extrinsic(best_block_hash, proof, key_owner_proof) .map_err(Error::RuntimeApi)?; @@ -1209,21 +1004,6 @@ where } } -/// Scan the `header` digest log for a BEEFY validator set change. Return either the new -/// validator set or `None` in case no validator set change has been signaled. -pub(crate) fn find_authorities_change(header: &B::Header) -> Option> -where - B: Block, -{ - let id = OpaqueDigestItemId::Consensus(&BEEFY_ENGINE_ID); - - let filter = |log: ConsensusLog| match log { - ConsensusLog::AuthoritiesChange(validator_set) => Some(validator_set), - _ => None, - }; - header.digest().convert_first(|l| l.try_to(id).and_then(filter)) -} - /// Calculate next block number to vote on. /// /// Return `None` if there is no votable target yet. @@ -1234,7 +1014,7 @@ where // if the mandatory block (session_start) does not have a beefy justification yet, // we vote on it let target = if best_beefy < session_start { - debug!(target: LOG_TARGET, "🥩 vote target - mandatory block: #{:?}", session_start,); + debug!(target: LOG_TARGET, "🥩 vote target - mandatory block: #{:?}", session_start); session_start } else { let diff = best_grandpa.saturating_sub(best_beefy) + 1u32.into(); @@ -1260,11 +1040,42 @@ where } } +/// Verify `active` validator set for `block` against the key store +/// +/// We want to make sure that we have _at least one_ key in our keystore that +/// is part of the validator set, that's because if there are no local keys +/// then we can't perform our job as a validator. +/// +/// Note that for a non-authority node there will be no keystore, and we will +/// return an error and don't check. The error can usually be ignored. +fn verify_validator_set( + block: &NumberFor, + active: &ValidatorSet, + key_store: &BeefyKeystore, +) -> Result<(), Error> { + let active: BTreeSet<&AuthorityId> = active.validators().iter().collect(); + + let public_keys = key_store.public_keys()?; + let store: BTreeSet<&AuthorityId> = public_keys.iter().collect(); + + if store.intersection(&active).count() == 0 { + let msg = "no authority public key found in store".to_string(); + debug!(target: LOG_TARGET, "🥩 for block {:?} {}", block, msg); + Err(Error::Keystore(msg)) + } else { + Ok(()) + } +} + #[cfg(test)] pub(crate) mod tests { use super::*; use crate::{ - communication::notification::{BeefyBestBlockStream, BeefyVersionedFinalityProofStream}, + communication::{ + gossip::GossipValidator, + notification::{BeefyBestBlockStream, BeefyVersionedFinalityProofStream}, + request_response::outgoing_requests_engine::OnDemandJustificationsEngine, + }, tests::{ create_beefy_keystore, get_beefy_streams, make_beefy_ids, BeefyPeer, BeefyTestNet, TestApi, @@ -1274,6 +1085,7 @@ pub(crate) mod tests { use futures::{future::poll_fn, task::Poll}; use parking_lot::Mutex; use sc_client_api::{Backend as BackendT, HeaderBackend}; + use sc_network_gossip::GossipEngine; use sc_network_sync::SyncingService; use sc_network_test::TestNetFactory; use sp_blockchain::Backend as BlockchainBackendT; @@ -1282,7 +1094,7 @@ pub(crate) mod tests { known_payloads::MMR_ROOT_ID, mmr::MmrRootProvider, test_utils::{generate_equivocation_proof, Keyring}, - Payload, SignedCommitment, + ConsensusLog, Payload, SignedCommitment, }; use sp_runtime::traits::{Header as HeaderT, One}; use substrate_test_runtime_client::{ @@ -1291,10 +1103,6 @@ pub(crate) mod tests { }; impl PersistedState { - pub fn voting_oracle(&self) -> &VoterOracle { - &self.voting_oracle - } - pub fn active_round(&self) -> Result<&Rounds, Error> { self.voting_oracle.active_rounds() } @@ -1390,13 +1198,10 @@ pub(crate) mod tests { on_demand_justifications, }; BeefyWorker { - base: BeefyWorkerBase { - backend, - runtime: api, - key_store: Some(keystore).into(), - metrics, - _phantom: Default::default(), - }, + backend, + runtime: api, + key_store: Some(keystore).into(), + metrics, payload_provider, sync: Arc::new(sync), links, @@ -1674,19 +1479,22 @@ pub(crate) mod tests { let mut worker = create_beefy_worker(net.peer(0), &keys[0], 1, validator_set.clone()); // keystore doesn't contain other keys than validators' - assert_eq!(worker.base.verify_validator_set(&1, &validator_set), Ok(())); + assert_eq!(verify_validator_set::(&1, &validator_set, &worker.key_store), Ok(())); // unknown `Bob` key let keys = &[Keyring::Bob]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); let err_msg = "no authority public key found in store".to_string(); let expected = Err(Error::Keystore(err_msg)); - assert_eq!(worker.base.verify_validator_set(&1, &validator_set), expected); + assert_eq!(verify_validator_set::(&1, &validator_set, &worker.key_store), expected); // worker has no keystore - worker.base.key_store = None.into(); + worker.key_store = None.into(); let expected_err = Err(Error::Keystore("no Keystore".into())); - assert_eq!(worker.base.verify_validator_set(&1, &validator_set), expected_err); + assert_eq!( + verify_validator_set::(&1, &validator_set, &worker.key_store), + expected_err + ); } #[tokio::test] @@ -1838,7 +1646,7 @@ pub(crate) mod tests { let mut net = BeefyTestNet::new(1); let mut worker = create_beefy_worker(net.peer(0), &keys[0], 1, validator_set.clone()); - worker.base.runtime = api_alice.clone(); + worker.runtime = api_alice.clone(); // let there be a block with num = 1: let _ = net.peer(0).push_blocks(1, false); diff --git a/substrate/client/consensus/common/Cargo.toml b/substrate/client/consensus/common/Cargo.toml index 496e6c82740c8904d023fef0a3f224433bbd3434..7f36dfc09ef8451927eb32ebd651258647abee3b 100644 --- a/substrate/client/consensus/common/Cargo.toml +++ b/substrate/client/consensus/common/Cargo.toml @@ -23,8 +23,8 @@ libp2p-identity = { version = "0.1.3", features = ["ed25519", "peerid"] } log = { workspace = true, default-features = true } mockall = "0.11.3" parking_lot = "0.12.1" -serde = { version = "1.0", features = ["derive"] } -thiserror = "1.0.48" +serde = { features = ["derive"], workspace = true, default-features = true } +thiserror = { workspace = true } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } sc-client-api = { path = "../../api" } sc-utils = { path = "../../utils" } diff --git a/substrate/client/consensus/grandpa/Cargo.toml b/substrate/client/consensus/grandpa/Cargo.toml index e342377d820adba8e1e0f8d1b7942d40b7d404d3..1ab953525220b82a500e5a23be0b94a673be51a2 100644 --- a/substrate/client/consensus/grandpa/Cargo.toml +++ b/substrate/client/consensus/grandpa/Cargo.toml @@ -28,8 +28,8 @@ log = { workspace = true, default-features = true } parity-scale-codec = { version = "3.6.1", features = ["derive"] } parking_lot = "0.12.1" rand = "0.8.5" -serde_json = "1.0.113" -thiserror = "1.0" +serde_json = { workspace = true, default-features = true } +thiserror = { workspace = true } fork-tree = { path = "../../../utils/fork-tree" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } sc-block-builder = { path = "../../block-builder" } @@ -57,7 +57,7 @@ sp-runtime = { path = "../../../primitives/runtime" } [dev-dependencies] assert_matches = "1.3.0" finality-grandpa = { version = "0.16.2", features = ["derive-codec", "test-helpers"] } -serde = "1.0.196" +serde = { workspace = true, default-features = true } tokio = "1.22.0" sc-network = { path = "../../network" } sc-network-test = { path = "../../network/test" } diff --git a/substrate/client/consensus/grandpa/rpc/Cargo.toml b/substrate/client/consensus/grandpa/rpc/Cargo.toml index ca0c2ac371af9b3a58fe6fe39d6357b0abfda342..f7e87415448e8c44952b957cf365fb36b089e4b0 100644 --- a/substrate/client/consensus/grandpa/rpc/Cargo.toml +++ b/substrate/client/consensus/grandpa/rpc/Cargo.toml @@ -18,8 +18,8 @@ futures = "0.3.16" jsonrpsee = { version = "0.22", features = ["client-core", "macros", "server"] } log = { workspace = true, default-features = true } parity-scale-codec = { version = "3.6.1", features = ["derive"] } -serde = { version = "1.0.196", features = ["derive"] } -thiserror = "1.0" +serde = { features = ["derive"], workspace = true, default-features = true } +thiserror = { workspace = true } sc-client-api = { path = "../../../api" } sc-consensus-grandpa = { path = ".." } sc-rpc = { path = "../../../rpc" } diff --git a/substrate/client/consensus/manual-seal/Cargo.toml b/substrate/client/consensus/manual-seal/Cargo.toml index 51745c996aa9955ed67197f81b8de4ddd8095aaf..ac32fed72289fd6a296de2720a75fb36c65ad1f8 100644 --- a/substrate/client/consensus/manual-seal/Cargo.toml +++ b/substrate/client/consensus/manual-seal/Cargo.toml @@ -23,8 +23,8 @@ codec = { package = "parity-scale-codec", version = "3.6.1" } futures = "0.3.21" futures-timer = "3.0.1" log = { workspace = true, default-features = true } -serde = { version = "1.0", features = ["derive"] } -thiserror = "1.0" +serde = { features = ["derive"], workspace = true, default-features = true } +thiserror = { workspace = true } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } sc-client-api = { path = "../../api" } sc-consensus = { path = "../common" } diff --git a/substrate/client/consensus/manual-seal/src/lib.rs b/substrate/client/consensus/manual-seal/src/lib.rs index e3608f6716c2631e5079c08e59ef991903b12d73..c3d360f071974ab38c6f1d112c23d6705f937bd3 100644 --- a/substrate/client/consensus/manual-seal/src/lib.rs +++ b/substrate/client/consensus/manual-seal/src/lib.rs @@ -429,7 +429,9 @@ mod tests { sender, } }); - let future = run_manual_seal(ManualSealParams { + + // spawn the background authorship task + tokio::spawn(run_manual_seal(ManualSealParams { block_import: client.clone(), env, client: client.clone(), @@ -438,12 +440,8 @@ mod tests { select_chain, create_inherent_data_providers: |_, _| async { Ok(()) }, consensus_data_provider: None, - }); - std::thread::spawn(|| { - let rt = tokio::runtime::Runtime::new().unwrap(); - // spawn the background authorship task - rt.block_on(future); - }); + })); + // submit a transaction to pool. let result = pool.submit_one(genesis_hash, SOURCE, uxt(Alice, 0)).await; // assert that it was successfully imported @@ -469,7 +467,10 @@ mod tests { assert_eq!(client.header(created_block.hash).unwrap().unwrap().number, 1) } - #[tokio::test] + // TODO: enable once the flakiness is fixed + // See https://github.com/paritytech/polkadot-sdk/issues/3603 + //#[tokio::test] + #[allow(unused)] async fn instant_seal_delayed_finalize() { let builder = TestClientBuilder::new(); let (client, select_chain) = builder.build_with_longest_chain(); @@ -507,7 +508,8 @@ mod tests { } }); - let future_instant_seal = run_manual_seal(ManualSealParams { + // spawn the background authorship task + tokio::spawn(run_manual_seal(ManualSealParams { block_import: client.clone(), commands_stream, env, @@ -516,24 +518,16 @@ mod tests { select_chain, create_inherent_data_providers: |_, _| async { Ok(()) }, consensus_data_provider: None, - }); - std::thread::spawn(|| { - let rt = tokio::runtime::Runtime::new().unwrap(); - // spawn the background authorship task - rt.block_on(future_instant_seal); - }); + })); let delay_sec = 5; - let future_delayed_finalize = run_delayed_finalize(DelayedFinalizeParams { + + // spawn the background finality task + tokio::spawn(run_delayed_finalize(DelayedFinalizeParams { client: client.clone(), delay_sec, spawn_handle: spawner, - }); - std::thread::spawn(|| { - let rt = tokio::runtime::Runtime::new().unwrap(); - // spawn the background authorship task - rt.block_on(future_delayed_finalize); - }); + })); let mut finality_stream = client.finality_notification_stream(); // submit a transaction to pool. @@ -589,7 +583,9 @@ mod tests { // this test checks that blocks are created as soon as an engine command is sent over the // stream. let (mut sink, commands_stream) = futures::channel::mpsc::channel(1024); - let future = run_manual_seal(ManualSealParams { + + // spawn the background authorship task + tokio::spawn(run_manual_seal(ManualSealParams { block_import: client.clone(), env, client: client.clone(), @@ -598,12 +594,8 @@ mod tests { select_chain, consensus_data_provider: None, create_inherent_data_providers: |_, _| async { Ok(()) }, - }); - std::thread::spawn(|| { - let rt = tokio::runtime::Runtime::new().unwrap(); - // spawn the background authorship task - rt.block_on(future); - }); + })); + // submit a transaction to pool. let result = pool.submit_one(genesis_hash, SOURCE, uxt(Alice, 0)).await; // assert that it was successfully imported @@ -675,7 +667,9 @@ mod tests { // this test checks that blocks are created as soon as an engine command is sent over the // stream. let (mut sink, commands_stream) = futures::channel::mpsc::channel(1024); - let future = run_manual_seal(ManualSealParams { + + // spawn the background authorship task + tokio::spawn(run_manual_seal(ManualSealParams { block_import: client.clone(), env, client: client.clone(), @@ -684,12 +678,8 @@ mod tests { select_chain, consensus_data_provider: None, create_inherent_data_providers: |_, _| async { Ok(()) }, - }); - std::thread::spawn(|| { - let rt = tokio::runtime::Runtime::new().unwrap(); - // spawn the background authorship task - rt.block_on(future); - }); + })); + // submit a transaction to pool. let result = pool.submit_one(genesis_hash, SOURCE, uxt(Alice, 0)).await; // assert that it was successfully imported @@ -781,7 +771,9 @@ mod tests { let env = ProposerFactory::new(spawner.clone(), client.clone(), pool.clone(), None, None); let (mut sink, commands_stream) = futures::channel::mpsc::channel(1024); - let future = run_manual_seal(ManualSealParams { + + // spawn the background authorship task + tokio::spawn(run_manual_seal(ManualSealParams { block_import: client.clone(), env, client: client.clone(), @@ -791,11 +783,8 @@ mod tests { // use a provider that pushes some post digest data consensus_data_provider: Some(Box::new(TestDigestProvider { _client: client.clone() })), create_inherent_data_providers: |_, _| async { Ok(()) }, - }); - std::thread::spawn(|| { - let rt = tokio::runtime::Runtime::new().unwrap(); - rt.block_on(future); - }); + })); + let (tx, rx) = futures::channel::oneshot::channel(); sink.send(EngineCommand::SealNewBlock { parent_hash: None, diff --git a/substrate/client/consensus/pow/Cargo.toml b/substrate/client/consensus/pow/Cargo.toml index 6caccb9879dd4f8c97438b86c9495268abc0535c..0791514035b43c83492175bcac67e2a6eb6aa9c6 100644 --- a/substrate/client/consensus/pow/Cargo.toml +++ b/substrate/client/consensus/pow/Cargo.toml @@ -22,7 +22,7 @@ futures = "0.3.21" futures-timer = "3.0.1" log = { workspace = true, default-features = true } parking_lot = "0.12.1" -thiserror = "1.0" +thiserror = { workspace = true } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } sc-client-api = { path = "../../api" } sc-consensus = { path = "../common" } diff --git a/substrate/client/db/src/lib.rs b/substrate/client/db/src/lib.rs index 870b4150b9df71ff63e5b5be7467b5369caa97c0..0faa90dfc4f925db8776c6914fcf864813b790a5 100644 --- a/substrate/client/db/src/lib.rs +++ b/substrate/client/db/src/lib.rs @@ -2531,7 +2531,7 @@ impl sc_client_api::backend::Backend for Backend { self.storage.state_db.pin(&hash, number.saturated_into::(), hint).map_err( |_| { sp_blockchain::Error::UnknownBlock(format!( - "State already discarded for `{:?}`", + "Unable to pin: state already discarded for `{:?}`", hash )) }, diff --git a/substrate/client/db/src/pinned_blocks_cache.rs b/substrate/client/db/src/pinned_blocks_cache.rs index 46c9287fb19ac1361153dbb65f4f0b353170042f..ac4aad07765cfbaa4d9c66efa4b44730b0b0a04c 100644 --- a/substrate/client/db/src/pinned_blocks_cache.rs +++ b/substrate/client/db/src/pinned_blocks_cache.rs @@ -20,7 +20,7 @@ use schnellru::{Limiter, LruMap}; use sp_runtime::{traits::Block as BlockT, Justifications}; const LOG_TARGET: &str = "db::pin"; -const PINNING_CACHE_SIZE: usize = 1024; +const PINNING_CACHE_SIZE: usize = 2048; /// Entry for pinned blocks cache. struct PinnedBlockCacheEntry { diff --git a/substrate/client/executor/Cargo.toml b/substrate/client/executor/Cargo.toml index 4e36de3c4f47c9f4f98f28d694ec3f23cbd81add..7fad7e3a2a0ebaf3a8ecde19ff9517e47ff28e75 100644 --- a/substrate/client/executor/Cargo.toml +++ b/substrate/client/executor/Cargo.toml @@ -23,6 +23,7 @@ tracing = "0.1.29" codec = { package = "parity-scale-codec", version = "3.6.1" } sc-executor-common = { path = "common" } +sc-executor-polkavm = { path = "polkavm" } sc-executor-wasmtime = { path = "wasmtime" } sp-api = { path = "../../primitives/api" } sp-core = { path = "../../primitives/core" } diff --git a/substrate/client/executor/common/Cargo.toml b/substrate/client/executor/common/Cargo.toml index 648fb9f0f5040d60d96defb07bc276e6e10cdc23..8ff34c3709a5e486fb9036a638788a532c6f296c 100644 --- a/substrate/client/executor/common/Cargo.toml +++ b/substrate/client/executor/common/Cargo.toml @@ -17,11 +17,12 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -thiserror = "1.0.48" +thiserror = { workspace = true } wasm-instrument = "0.4" sc-allocator = { path = "../../allocator" } sp-maybe-compressed-blob = { path = "../../../primitives/maybe-compressed-blob" } sp-wasm-interface = { path = "../../../primitives/wasm-interface" } +polkavm = { workspace = true } [features] default = [] diff --git a/substrate/client/executor/common/src/error.rs b/substrate/client/executor/common/src/error.rs index 2a0dc364b4103ebc53ee767300bf25cc7211fe8b..b7c7e2b701984e37d43741ecb3cc2a1efdab9bdc 100644 --- a/substrate/client/executor/common/src/error.rs +++ b/substrate/client/executor/common/src/error.rs @@ -150,6 +150,24 @@ pub enum WasmError { Other(String), } +impl From for WasmError { + fn from(error: polkavm::ProgramParseError) -> Self { + WasmError::Other(error.to_string()) + } +} + +impl From for WasmError { + fn from(error: polkavm::Error) -> Self { + WasmError::Other(error.to_string()) + } +} + +impl From for Error { + fn from(error: polkavm::Error) -> Self { + Error::Other(error.to_string()) + } +} + /// An error message with an attached backtrace. #[derive(Debug)] pub struct MessageWithBacktrace { diff --git a/substrate/client/executor/common/src/lib.rs b/substrate/client/executor/common/src/lib.rs index 751801fb30da18e48e05daedbde644ad8699ce10..ff9fab2c8889fbbc9040740cd5f5a49f158d618c 100644 --- a/substrate/client/executor/common/src/lib.rs +++ b/substrate/client/executor/common/src/lib.rs @@ -25,3 +25,7 @@ pub mod error; pub mod runtime_blob; pub mod util; pub mod wasm_runtime; + +pub(crate) fn is_polkavm_enabled() -> bool { + std::env::var_os("SUBSTRATE_ENABLE_POLKAVM").map_or(false, |value| value == "1") +} diff --git a/substrate/client/executor/common/src/runtime_blob/runtime_blob.rs b/substrate/client/executor/common/src/runtime_blob/runtime_blob.rs index becf9e219b0b7a60f97bb08b4044897b143bd006..d689083b2f85cd7f3fc16f220c1b45a474dd87f5 100644 --- a/substrate/client/executor/common/src/runtime_blob/runtime_blob.rs +++ b/substrate/client/executor/common/src/runtime_blob/runtime_blob.rs @@ -17,23 +17,23 @@ // along with this program. If not, see . use crate::{error::WasmError, wasm_runtime::HeapAllocStrategy}; -use wasm_instrument::{ - export_mutable_globals, - parity_wasm::elements::{ - deserialize_buffer, serialize, ExportEntry, External, Internal, MemorySection, MemoryType, - Module, Section, - }, +use wasm_instrument::parity_wasm::elements::{ + deserialize_buffer, serialize, ExportEntry, External, Internal, MemorySection, MemoryType, + Module, Section, }; -/// A bunch of information collected from a WebAssembly module. +/// A program blob containing a Substrate runtime. #[derive(Clone)] -pub struct RuntimeBlob { - raw_module: Module, +pub struct RuntimeBlob(BlobKind); + +#[derive(Clone)] +enum BlobKind { + WebAssembly(Module), + PolkaVM(polkavm::ProgramBlob<'static>), } impl RuntimeBlob { - /// Create `RuntimeBlob` from the given wasm code. Will attempt to decompress the code before - /// deserializing it. + /// Create `RuntimeBlob` from the given WASM or PolkaVM compressed program blob. /// /// See [`sp_maybe_compressed_blob`] for details about decompression. pub fn uncompress_if_needed(wasm_code: &[u8]) -> Result { @@ -43,31 +43,26 @@ impl RuntimeBlob { Self::new(&wasm_code) } - /// Create `RuntimeBlob` from the given wasm code. + /// Create `RuntimeBlob` from the given WASM or PolkaVM program blob. /// - /// Returns `Err` if the wasm code cannot be deserialized. - pub fn new(wasm_code: &[u8]) -> Result { - let raw_module: Module = deserialize_buffer(wasm_code) - .map_err(|e| WasmError::Other(format!("cannot deserialize module: {:?}", e)))?; - Ok(Self { raw_module }) - } - - /// The number of globals defined in locally in this module. - pub fn declared_globals_count(&self) -> u32 { - self.raw_module - .global_section() - .map(|gs| gs.entries().len() as u32) - .unwrap_or(0) - } - - /// The number of imports of globals. - pub fn imported_globals_count(&self) -> u32 { - self.raw_module.import_section().map(|is| is.globals() as u32).unwrap_or(0) - } + /// Returns `Err` if the blob cannot be deserialized. + /// + /// Will only accept a PolkaVM program if the `SUBSTRATE_ENABLE_POLKAVM` environment + /// variable is set to `1`. + pub fn new(raw_blob: &[u8]) -> Result { + if raw_blob.starts_with(b"PVM\0") { + if crate::is_polkavm_enabled() { + return Ok(Self(BlobKind::PolkaVM( + polkavm::ProgramBlob::parse(raw_blob)?.into_owned(), + ))); + } else { + return Err(WasmError::Other("expected a WASM runtime blob, found a PolkaVM runtime blob; set the 'SUBSTRATE_ENABLE_POLKAVM' environment variable to enable the experimental PolkaVM-based executor".to_string())); + } + } - /// Perform an instrumentation that makes sure that the mutable globals are exported. - pub fn expose_mutable_globals(&mut self) { - export_mutable_globals(&mut self.raw_module, "exported_internal_global"); + let raw_module: Module = deserialize_buffer(raw_blob) + .map_err(|e| WasmError::Other(format!("cannot deserialize module: {:?}", e)))?; + Ok(Self(BlobKind::WebAssembly(raw_module))) } /// Run a pass that instrument this module so as to introduce a deterministic stack height @@ -80,26 +75,16 @@ impl RuntimeBlob { /// /// The stack cost of a function is computed based on how much locals there are and the maximum /// depth of the wasm operand stack. + /// + /// Only valid for WASM programs; will return an error if the blob is a PolkaVM program. pub fn inject_stack_depth_metering(self, stack_depth_limit: u32) -> Result { let injected_module = - wasm_instrument::inject_stack_limiter(self.raw_module, stack_depth_limit).map_err( - |e| WasmError::Other(format!("cannot inject the stack limiter: {:?}", e)), - )?; - - Ok(Self { raw_module: injected_module }) - } + wasm_instrument::inject_stack_limiter(self.into_webassembly_blob()?, stack_depth_limit) + .map_err(|e| { + WasmError::Other(format!("cannot inject the stack limiter: {:?}", e)) + })?; - /// Perform an instrumentation that makes sure that a specific function `entry_point` is - /// exported - pub fn entry_point_exists(&self, entry_point: &str) -> bool { - self.raw_module - .export_section() - .map(|e| { - e.entries().iter().any(|e| { - matches!(e.internal(), Internal::Function(_)) && e.field() == entry_point - }) - }) - .unwrap_or_default() + Ok(Self(BlobKind::WebAssembly(injected_module))) } /// Converts a WASM memory import into a memory section and exports it. @@ -107,8 +92,11 @@ impl RuntimeBlob { /// Does nothing if there's no memory import. /// /// May return an error in case the WASM module is invalid. + /// + /// Only valid for WASM programs; will return an error if the blob is a PolkaVM program. pub fn convert_memory_import_into_export(&mut self) -> Result<(), WasmError> { - let import_section = match self.raw_module.import_section_mut() { + let raw_module = self.as_webassembly_blob_mut()?; + let import_section = match raw_module.import_section_mut() { Some(import_section) => import_section, None => return Ok(()), }; @@ -124,7 +112,7 @@ impl RuntimeBlob { let memory_name = entry.field().to_owned(); import_entries.remove(index); - self.raw_module + raw_module .insert_section(Section::Memory(MemorySection::with_entries(vec![memory_ty]))) .map_err(|error| { WasmError::Other(format!( @@ -133,14 +121,14 @@ impl RuntimeBlob { )) })?; - if self.raw_module.export_section_mut().is_none() { + if raw_module.export_section_mut().is_none() { // A module without an export section is somewhat unrealistic, but let's do this // just in case to cover all of our bases. - self.raw_module + raw_module .insert_section(Section::Export(Default::default())) .expect("an export section can be always inserted if it doesn't exist; qed"); } - self.raw_module + raw_module .export_section_mut() .expect("export section already existed or we just added it above, so it always exists; qed") .entries_mut() @@ -156,12 +144,14 @@ impl RuntimeBlob { /// /// Will return an error in case there is no memory section present, /// or if the memory section is empty. + /// + /// Only valid for WASM programs; will return an error if the blob is a PolkaVM program. pub fn setup_memory_according_to_heap_alloc_strategy( &mut self, heap_alloc_strategy: HeapAllocStrategy, ) -> Result<(), WasmError> { - let memory_section = self - .raw_module + let raw_module = self.as_webassembly_blob_mut()?; + let memory_section = raw_module .memory_section_mut() .ok_or_else(|| WasmError::Other("no memory section found".into()))?; @@ -187,8 +177,11 @@ impl RuntimeBlob { /// Scans the wasm blob for the first section with the name that matches the given. Returns the /// contents of the custom section if found or `None` otherwise. + /// + /// Only valid for WASM programs; will return an error if the blob is a PolkaVM program. pub fn custom_section_contents(&self, section_name: &str) -> Option<&[u8]> { - self.raw_module + self.as_webassembly_blob() + .ok()? .custom_sections() .find(|cs| cs.name() == section_name) .map(|cs| cs.payload()) @@ -196,11 +189,45 @@ impl RuntimeBlob { /// Consumes this runtime blob and serializes it. pub fn serialize(self) -> Vec { - serialize(self.raw_module).expect("serializing into a vec should succeed; qed") + match self.0 { + BlobKind::WebAssembly(raw_module) => + serialize(raw_module).expect("serializing into a vec should succeed; qed"), + BlobKind::PolkaVM(ref blob) => blob.as_bytes().to_vec(), + } + } + + fn as_webassembly_blob(&self) -> Result<&Module, WasmError> { + match self.0 { + BlobKind::WebAssembly(ref raw_module) => Ok(raw_module), + BlobKind::PolkaVM(..) => Err(WasmError::Other( + "expected a WebAssembly program; found a PolkaVM program blob".into(), + )), + } } - /// Destructure this structure into the underlying parity-wasm Module. - pub fn into_inner(self) -> Module { - self.raw_module + fn as_webassembly_blob_mut(&mut self) -> Result<&mut Module, WasmError> { + match self.0 { + BlobKind::WebAssembly(ref mut raw_module) => Ok(raw_module), + BlobKind::PolkaVM(..) => Err(WasmError::Other( + "expected a WebAssembly program; found a PolkaVM program blob".into(), + )), + } + } + + fn into_webassembly_blob(self) -> Result { + match self.0 { + BlobKind::WebAssembly(raw_module) => Ok(raw_module), + BlobKind::PolkaVM(..) => Err(WasmError::Other( + "expected a WebAssembly program; found a PolkaVM program blob".into(), + )), + } + } + + /// Gets a reference to the inner PolkaVM program blob, if this is a PolkaVM program. + pub fn as_polkavm_blob(&self) -> Option<&polkavm::ProgramBlob> { + match self.0 { + BlobKind::WebAssembly(..) => None, + BlobKind::PolkaVM(ref blob) => Some(blob), + } } } diff --git a/substrate/client/executor/common/src/wasm_runtime.rs b/substrate/client/executor/common/src/wasm_runtime.rs index d8e142b9d55905e3e31d2247f5a48e8e2b0f987a..e8f429a3dbb2a0451769d427a81bdf5eaef492ff 100644 --- a/substrate/client/executor/common/src/wasm_runtime.rs +++ b/substrate/client/executor/common/src/wasm_runtime.rs @@ -19,7 +19,6 @@ //! Definitions for a wasm runtime. use crate::error::Error; -use sp_wasm_interface::Value; pub use sc_allocator::AllocationStats; @@ -30,46 +29,6 @@ pub const DEFAULT_HEAP_ALLOC_STRATEGY: HeapAllocStrategy = /// Default heap allocation pages. pub const DEFAULT_HEAP_ALLOC_PAGES: u32 = 2048; -/// A method to be used to find the entrypoint when calling into the runtime -/// -/// Contains variants on how to resolve wasm function that will be invoked. -pub enum InvokeMethod<'a> { - /// Call function exported with this name. - /// - /// Located function should have (u32, u32) -> u64 signature. - Export(&'a str), - /// Call a function found in the exported table found under the given index. - /// - /// Located function should have (u32, u32) -> u64 signature. - Table(u32), - /// Call function by reference from table through a wrapper. - /// - /// Invoked function (`dispatcher_ref`) function - /// should have (u32, u32, u32) -> u64 signature. - /// - /// `func` will be passed to the invoked function as a first argument. - TableWithWrapper { - /// Wrapper for the call. - /// - /// Function pointer, index into runtime exported table. - dispatcher_ref: u32, - /// Extra argument for dispatch. - /// - /// Common usage would be to use it as an actual wasm function pointer - /// that should be invoked, but can be used as any extra argument on the - /// callee side. - /// - /// This is typically generated and invoked by the runtime itself. - func: u32, - }, -} - -impl<'a> From<&'a str> for InvokeMethod<'a> { - fn from(val: &'a str) -> InvokeMethod<'a> { - InvokeMethod::Export(val) - } -} - /// A trait that defines an abstract WASM runtime module. /// /// This can be implemented by an execution engine. @@ -87,7 +46,7 @@ pub trait WasmInstance: Send { /// Before execution, instance is reset. /// /// Returns the encoded result on success. - fn call(&mut self, method: InvokeMethod, data: &[u8]) -> Result, Error> { + fn call(&mut self, method: &str, data: &[u8]) -> Result, Error> { self.call_with_allocation_stats(method, data).0 } @@ -98,7 +57,7 @@ pub trait WasmInstance: Send { /// Returns the encoded result on success. fn call_with_allocation_stats( &mut self, - method: InvokeMethod, + method: &str, data: &[u8], ) -> (Result, Error>, Option); @@ -110,11 +69,6 @@ pub trait WasmInstance: Send { fn call_export(&mut self, method: &str, data: &[u8]) -> Result, Error> { self.call(method.into(), data) } - - /// Get the value from a global with the given `name`. - /// - /// This method is only suitable for getting immutable globals. - fn get_global_const(&mut self, name: &str) -> Result, Error>; } /// Defines the heap pages allocation strategy the wasm runtime should use. diff --git a/substrate/client/executor/polkavm/Cargo.toml b/substrate/client/executor/polkavm/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..9d0eb8ccf0ee072c86195068a589309edd132ba4 --- /dev/null +++ b/substrate/client/executor/polkavm/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "sc-executor-polkavm" +version = "0.29.0" +authors.workspace = true +edition.workspace = true +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage = "https://substrate.io" +repository.workspace = true +description = "PolkaVM executor for Substrate" +readme = "README.md" + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +log = { workspace = true } +polkavm = { workspace = true } + +sc-executor-common = { path = "../common" } +sp-wasm-interface = { path = "../../../primitives/wasm-interface" } diff --git a/substrate/client/executor/polkavm/README.md b/substrate/client/executor/polkavm/README.md new file mode 100644 index 0000000000000000000000000000000000000000..64fc2fa0c28485566fc60329ad8cfc0057112bc7 --- /dev/null +++ b/substrate/client/executor/polkavm/README.md @@ -0,0 +1 @@ +License: GPL-3.0-or-later WITH Classpath-exception-2.0 diff --git a/substrate/client/executor/polkavm/src/lib.rs b/substrate/client/executor/polkavm/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..1bd72eb33d309997832de5fca9250ca651090ce7 --- /dev/null +++ b/substrate/client/executor/polkavm/src/lib.rs @@ -0,0 +1,261 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use polkavm::{Caller, Reg}; +use sc_executor_common::{ + error::{Error, WasmError}, + wasm_runtime::{AllocationStats, WasmInstance, WasmModule}, +}; +use sp_wasm_interface::{ + Function, FunctionContext, HostFunctions, Pointer, Value, ValueType, WordSize, +}; + +#[repr(transparent)] +pub struct InstancePre(polkavm::InstancePre<()>); + +#[repr(transparent)] +pub struct Instance(polkavm::Instance<()>); + +impl WasmModule for InstancePre { + fn new_instance(&self) -> Result, Error> { + Ok(Box::new(Instance(self.0.instantiate()?))) + } +} + +impl WasmInstance for Instance { + fn call_with_allocation_stats( + &mut self, + name: &str, + raw_data: &[u8], + ) -> (Result, Error>, Option) { + let Some(method_index) = self.0.module().lookup_export(name) else { + return ( + Err(format!("cannot call into the runtime: export not found: '{name}'").into()), + None, + ); + }; + + let Ok(raw_data_length) = u32::try_from(raw_data.len()) else { + return ( + Err(format!("cannot call runtime method '{name}': input payload is too big").into()), + None, + ); + }; + + // TODO: This will leak guest memory; find a better solution. + let mut state_args = polkavm::StateArgs::new(); + + // Make sure the memory is cleared... + state_args.reset_memory(true); + // ...and allocate space for the input payload. + state_args.sbrk(raw_data_length); + + match self.0.update_state(state_args) { + Ok(()) => {}, + Err(polkavm::ExecutionError::Trap(trap)) => { + return (Err(format!("call into the runtime method '{name}' failed: failed to prepare the guest's memory: {trap}").into()), None); + }, + Err(polkavm::ExecutionError::Error(error)) => { + return (Err(format!("call into the runtime method '{name}' failed: failed to prepare the guest's memory: {error}").into()), None); + }, + Err(polkavm::ExecutionError::OutOfGas) => unreachable!("gas metering is never enabled"), + } + + // Grab the address of where the guest's heap starts; that's where we've just allocated + // the memory for the input payload. + let data_pointer = self.0.module().memory_map().heap_base(); + + if let Err(error) = self.0.write_memory(data_pointer, raw_data) { + return (Err(format!("call into the runtime method '{name}': failed to write the input payload into guest memory: {error}").into()), None); + } + + let mut state = (); + let mut call_args = polkavm::CallArgs::new(&mut state, method_index); + call_args.args_untyped(&[data_pointer, raw_data_length]); + + match self.0.call(Default::default(), call_args) { + Ok(()) => {}, + Err(polkavm::ExecutionError::Trap(trap)) => { + return ( + Err(format!("call into the runtime method '{name}' failed: {trap}").into()), + None, + ); + }, + Err(polkavm::ExecutionError::Error(error)) => { + return ( + Err(format!("call into the runtime method '{name}' failed: {error}").into()), + None, + ); + }, + Err(polkavm::ExecutionError::OutOfGas) => unreachable!("gas metering is never enabled"), + } + + let result_pointer = self.0.get_reg(Reg::A0); + let result_length = self.0.get_reg(Reg::A1); + let output = match self.0.read_memory_into_vec(result_pointer, result_length) { + Ok(output) => output, + Err(error) => { + return (Err(format!("call into the runtime method '{name}' failed: failed to read the return payload: {error}").into()), None) + }, + }; + + (Ok(output), None) + } +} + +struct Context<'r, 'a>(&'r mut polkavm::Caller<'a, ()>); + +impl<'r, 'a> FunctionContext for Context<'r, 'a> { + fn read_memory_into( + &self, + address: Pointer, + dest: &mut [u8], + ) -> sp_wasm_interface::Result<()> { + self.0 + .read_memory_into_slice(u32::from(address), dest) + .map_err(|error| error.to_string()) + .map(|_| ()) + } + + fn write_memory(&mut self, address: Pointer, data: &[u8]) -> sp_wasm_interface::Result<()> { + self.0.write_memory(u32::from(address), data).map_err(|error| error.to_string()) + } + + fn allocate_memory(&mut self, size: WordSize) -> sp_wasm_interface::Result> { + let pointer = self.0.sbrk(0).expect("fetching the current heap pointer never fails"); + + // TODO: This will leak guest memory; find a better solution. + self.0.sbrk(size).ok_or_else(|| String::from("allocation failed"))?; + + Ok(Pointer::new(pointer)) + } + + fn deallocate_memory(&mut self, _ptr: Pointer) -> sp_wasm_interface::Result<()> { + // This is only used by the allocator host function, which is unused under PolkaVM. + unimplemented!("'deallocate_memory' is never used when running under PolkaVM"); + } + + fn register_panic_error_message(&mut self, _message: &str) { + unimplemented!("'register_panic_error_message' is never used when running under PolkaVM"); + } +} + +fn call_host_function( + caller: &mut Caller<()>, + function: &dyn Function, +) -> Result<(), polkavm::Trap> { + let mut args = [Value::I64(0); Reg::ARG_REGS.len()]; + let mut nth_reg = 0; + for (nth_arg, kind) in function.signature().args.iter().enumerate() { + match kind { + ValueType::I32 => { + args[nth_arg] = Value::I32(caller.get_reg(Reg::ARG_REGS[nth_reg]) as i32); + nth_reg += 1; + }, + ValueType::F32 => { + args[nth_arg] = Value::F32(caller.get_reg(Reg::ARG_REGS[nth_reg])); + nth_reg += 1; + }, + ValueType::I64 => { + let value_lo = caller.get_reg(Reg::ARG_REGS[nth_reg]); + nth_reg += 1; + + let value_hi = caller.get_reg(Reg::ARG_REGS[nth_reg]); + nth_reg += 1; + + args[nth_arg] = + Value::I64((u64::from(value_lo) | (u64::from(value_hi) << 32)) as i64); + }, + ValueType::F64 => { + let value_lo = caller.get_reg(Reg::ARG_REGS[nth_reg]); + nth_reg += 1; + + let value_hi = caller.get_reg(Reg::ARG_REGS[nth_reg]); + nth_reg += 1; + + args[nth_arg] = Value::F64(u64::from(value_lo) | (u64::from(value_hi) << 32)); + }, + } + } + + log::trace!( + "Calling host function: '{}', args = {:?}", + function.name(), + &args[..function.signature().args.len()] + ); + + let value = match function + .execute(&mut Context(caller), &mut args.into_iter().take(function.signature().args.len())) + { + Ok(value) => value, + Err(error) => { + log::warn!("Call into the host function '{}' failed: {error}", function.name()); + return Err(polkavm::Trap::default()); + }, + }; + + if let Some(value) = value { + match value { + Value::I32(value) => { + caller.set_reg(Reg::A0, value as u32); + }, + Value::F32(value) => { + caller.set_reg(Reg::A0, value); + }, + Value::I64(value) => { + caller.set_reg(Reg::A0, value as u32); + caller.set_reg(Reg::A1, (value >> 32) as u32); + }, + Value::F64(value) => { + caller.set_reg(Reg::A0, value as u32); + caller.set_reg(Reg::A1, (value >> 32) as u32); + }, + } + } + + Ok(()) +} + +pub fn create_runtime(blob: &polkavm::ProgramBlob) -> Result, WasmError> +where + H: HostFunctions, +{ + static ENGINE: std::sync::OnceLock> = + std::sync::OnceLock::new(); + + let engine = ENGINE.get_or_init(|| { + let config = polkavm::Config::from_env()?; + polkavm::Engine::new(&config) + }); + + let engine = match engine { + Ok(ref engine) => engine, + Err(ref error) => { + return Err(WasmError::Other(error.to_string())); + }, + }; + + let module = polkavm::Module::from_blob(&engine, &polkavm::ModuleConfig::default(), blob)?; + let mut linker = polkavm::Linker::new(&engine); + for function in H::host_functions() { + linker.func_new(function.name(), |mut caller| call_host_function(&mut caller, function))?; + } + + let instance_pre = linker.instantiate_pre(&module)?; + Ok(Box::new(InstancePre(instance_pre))) +} diff --git a/substrate/client/executor/src/executor.rs b/substrate/client/executor/src/executor.rs index 499bb704b16990de49d2b3311c48fa8ee2c1813e..d56a3b389ef42868d0543303c5fbd63ea8d2d884 100644 --- a/substrate/client/executor/src/executor.rs +++ b/substrate/client/executor/src/executor.rs @@ -518,7 +518,7 @@ where runtime_code, ext, heap_alloc_strategy, - |_, mut instance, _onchain_version, mut ext| { + |_, mut instance, _on_chain_version, mut ext| { with_externalities_safe(&mut **ext, move || instance.call_export(method, data)) }, ); @@ -682,18 +682,18 @@ impl CodeExecutor for NativeElseWasmExecut runtime_code, ext, heap_alloc_strategy, - |_, mut instance, onchain_version, mut ext| { - let onchain_version = - onchain_version.ok_or_else(|| Error::ApiError("Unknown version".into()))?; + |_, mut instance, on_chain_version, mut ext| { + let on_chain_version = + on_chain_version.ok_or_else(|| Error::ApiError("Unknown version".into()))?; let can_call_with = - onchain_version.can_call_with(&self.native_version.runtime_version); + on_chain_version.can_call_with(&self.native_version.runtime_version); if use_native && can_call_with { tracing::trace!( target: "executor", native = %self.native_version.runtime_version, - chain = %onchain_version, + chain = %on_chain_version, "Request for native execution succeeded", ); @@ -705,7 +705,7 @@ impl CodeExecutor for NativeElseWasmExecut tracing::trace!( target: "executor", native = %self.native_version.runtime_version, - chain = %onchain_version, + chain = %on_chain_version, "Request for native execution failed", ); } diff --git a/substrate/client/executor/src/wasm_runtime.rs b/substrate/client/executor/src/wasm_runtime.rs index 501279a312cc09bdaada09b64895a1e6c5a2c87a..be8344ba79b7d78f8d49880ea73754788f33ff3e 100644 --- a/substrate/client/executor/src/wasm_runtime.rs +++ b/substrate/client/executor/src/wasm_runtime.rs @@ -297,6 +297,10 @@ pub fn create_wasm_runtime_with_code( where H: HostFunctions, { + if let Some(blob) = blob.as_polkavm_blob() { + return sc_executor_polkavm::create_runtime::(blob); + } + match wasm_method { WasmExecutionMethod::Compiled { instantiation_strategy } => sc_executor_wasmtime::create_runtime::( diff --git a/substrate/client/executor/wasmtime/src/instance_wrapper.rs b/substrate/client/executor/wasmtime/src/instance_wrapper.rs index 8852532adbcaa40293f2d7dfd0a326572d3c1ca4..4f67d1df9c5f3193d508b408c48c5c1230de2b54 100644 --- a/substrate/client/executor/wasmtime/src/instance_wrapper.rs +++ b/substrate/client/executor/wasmtime/src/instance_wrapper.rs @@ -22,36 +22,12 @@ use std::sync::Arc; use crate::runtime::{InstanceCounter, ReleaseInstanceHandle, Store, StoreData}; -use sc_executor_common::{ - error::{Backtrace, Error, MessageWithBacktrace, Result, WasmError}, - wasm_runtime::InvokeMethod, -}; -use sp_wasm_interface::{Pointer, Value, WordSize}; -use wasmtime::{ - AsContext, AsContextMut, Engine, Extern, Instance, InstancePre, Memory, Table, Val, -}; - -/// Invoked entrypoint format. -pub enum EntryPointType { - /// Direct call. - /// - /// Call is made by providing only payload reference and length. - Direct { entrypoint: wasmtime::TypedFunc<(u32, u32), u64> }, - /// Indirect call. - /// - /// Call is made by providing payload reference and length, and extra argument - /// for advanced routing. - Wrapped { - /// The extra argument passed to the runtime. It is typically a wasm function pointer. - func: u32, - dispatcher: wasmtime::TypedFunc<(u32, u32, u32), u64>, - }, -} +use sc_executor_common::error::{Backtrace, Error, MessageWithBacktrace, Result, WasmError}; +use sp_wasm_interface::{Pointer, WordSize}; +use wasmtime::{AsContext, AsContextMut, Engine, Instance, InstancePre, Memory}; /// Wasm blob entry point. -pub struct EntryPoint { - call_type: EntryPointType, -} +pub struct EntryPoint(wasmtime::TypedFunc<(u32, u32), u64>); impl EntryPoint { /// Call this entry point. @@ -64,13 +40,7 @@ impl EntryPoint { let data_ptr = u32::from(data_ptr); let data_len = u32::from(data_len); - match self.call_type { - EntryPointType::Direct { ref entrypoint } => - entrypoint.call(&mut *store, (data_ptr, data_len)), - EntryPointType::Wrapped { func, ref dispatcher } => - dispatcher.call(&mut *store, (func, data_ptr, data_len)), - } - .map_err(|trap| { + self.0.call(&mut *store, (data_ptr, data_len)).map_err(|trap| { let host_state = store .data_mut() .host_state @@ -99,18 +69,7 @@ impl EntryPoint { let entrypoint = func .typed::<(u32, u32), u64>(ctx) .map_err(|_| "Invalid signature for direct entry point")?; - Ok(Self { call_type: EntryPointType::Direct { entrypoint } }) - } - - pub fn wrapped( - dispatcher: wasmtime::Func, - func: u32, - ctx: impl AsContext, - ) -> std::result::Result { - let dispatcher = dispatcher - .typed::<(u32, u32, u32), u64>(ctx) - .map_err(|_| "Invalid signature for wrapped entry point")?; - Ok(Self { call_type: EntryPointType::Wrapped { func, dispatcher } }) + Ok(Self(entrypoint)) } } @@ -178,10 +137,8 @@ impl InstanceWrapper { })?; let memory = get_linear_memory(&instance, &mut store)?; - let table = get_table(&instance, &mut store); store.data_mut().memory = Some(memory); - store.data_mut().table = table; Ok(InstanceWrapper { instance, store, _release_instance_handle }) } @@ -190,61 +147,17 @@ impl InstanceWrapper { /// /// An entrypoint must have a signature `(i32, i32) -> i64`, otherwise this function will return /// an error. - pub fn resolve_entrypoint(&mut self, method: InvokeMethod) -> Result { - Ok(match method { - InvokeMethod::Export(method) => { - // Resolve the requested method and verify that it has a proper signature. - let export = - self.instance.get_export(&mut self.store, method).ok_or_else(|| { - Error::from(format!("Exported method {} is not found", method)) - })?; - let func = export - .into_func() - .ok_or_else(|| Error::from(format!("Export {} is not a function", method)))?; - EntryPoint::direct(func, &self.store).map_err(|_| { - Error::from(format!("Exported function '{}' has invalid signature.", method)) - })? - }, - InvokeMethod::Table(func_ref) => { - let table = self - .instance - .get_table(&mut self.store, "__indirect_function_table") - .ok_or(Error::NoTable)?; - let val = table - .get(&mut self.store, func_ref) - .ok_or(Error::NoTableEntryWithIndex(func_ref))?; - let func = val - .funcref() - .ok_or(Error::TableElementIsNotAFunction(func_ref))? - .ok_or(Error::FunctionRefIsNull(func_ref))?; - - EntryPoint::direct(*func, &self.store).map_err(|_| { - Error::from(format!( - "Function @{} in exported table has invalid signature for direct call.", - func_ref, - )) - })? - }, - InvokeMethod::TableWithWrapper { dispatcher_ref, func } => { - let table = self - .instance - .get_table(&mut self.store, "__indirect_function_table") - .ok_or(Error::NoTable)?; - let val = table - .get(&mut self.store, dispatcher_ref) - .ok_or(Error::NoTableEntryWithIndex(dispatcher_ref))?; - let dispatcher = val - .funcref() - .ok_or(Error::TableElementIsNotAFunction(dispatcher_ref))? - .ok_or(Error::FunctionRefIsNull(dispatcher_ref))?; - - EntryPoint::wrapped(*dispatcher, func, &self.store).map_err(|_| { - Error::from(format!( - "Function @{} in exported table has invalid signature for wrapped call.", - dispatcher_ref, - )) - })? - }, + pub fn resolve_entrypoint(&mut self, method: &str) -> Result { + // Resolve the requested method and verify that it has a proper signature. + let export = self + .instance + .get_export(&mut self.store, method) + .ok_or_else(|| Error::from(format!("Exported method {} is not found", method)))?; + let func = export + .into_func() + .ok_or_else(|| Error::from(format!("Export {} is not a function", method)))?; + EntryPoint::direct(func, &self.store).map_err(|_| { + Error::from(format!("Exported function '{}' has invalid signature.", method)) }) } @@ -268,24 +181,6 @@ impl InstanceWrapper { Ok(heap_base as u32) } - - /// Get the value from a global with the given `name`. - pub fn get_global_val(&mut self, name: &str) -> Result> { - let global = match self.instance.get_export(&mut self.store, name) { - Some(global) => global, - None => return Ok(None), - }; - - let global = global.into_global().ok_or_else(|| format!("`{}` is not a global", name))?; - - match global.get(&mut self.store) { - Val::I32(val) => Ok(Some(Value::I32(val))), - Val::I64(val) => Ok(Some(Value::I64(val))), - Val::F32(val) => Ok(Some(Value::F32(val))), - Val::F64(val) => Ok(Some(Value::F64(val))), - _ => Err("Unknown value type".into()), - } - } } /// Extract linear memory instance from the given instance. @@ -301,15 +196,6 @@ fn get_linear_memory(instance: &Instance, ctx: impl AsContextMut) -> Result Option
CommittedCandidateReceipt<H = Hash>
{ - instance - .get_export(ctx, "__indirect_function_table") - .as_ref() - .cloned() - .and_then(Extern::into_table) -} - /// Functions related to memory. impl InstanceWrapper { pub(crate) fn store(&self) -> &Store { diff --git a/substrate/client/executor/wasmtime/src/runtime.rs b/substrate/client/executor/wasmtime/src/runtime.rs index ac88663f4e7915e49262ee8186bfaf7d92f6064d..595cc503272ebea7d249262c66b206cab5697d36 100644 --- a/substrate/client/executor/wasmtime/src/runtime.rs +++ b/substrate/client/executor/wasmtime/src/runtime.rs @@ -30,10 +30,10 @@ use sc_executor_common::{ error::{Error, Result, WasmError}, runtime_blob::RuntimeBlob, util::checked_range, - wasm_runtime::{HeapAllocStrategy, InvokeMethod, WasmInstance, WasmModule}, + wasm_runtime::{HeapAllocStrategy, WasmInstance, WasmModule}, }; use sp_runtime_interface::unpack_ptr_and_len; -use sp_wasm_interface::{HostFunctions, Pointer, Value, WordSize}; +use sp_wasm_interface::{HostFunctions, Pointer, WordSize}; use std::{ path::{Path, PathBuf}, sync::{ @@ -41,7 +41,7 @@ use std::{ Arc, }, }; -use wasmtime::{AsContext, Engine, Memory, Table}; +use wasmtime::{AsContext, Engine, Memory}; const MAX_INSTANCE_COUNT: u32 = 64; @@ -51,8 +51,6 @@ pub(crate) struct StoreData { pub(crate) host_state: Option, /// This will be always set once the store is initialized. pub(crate) memory: Option, - /// This will be set only if the runtime actually contains a table. - pub(crate) table: Option
, } impl StoreData { @@ -164,7 +162,7 @@ pub struct WasmtimeInstance { impl WasmtimeInstance { fn call_impl( &mut self, - method: InvokeMethod, + method: &str, data: &[u8], allocation_stats: &mut Option, ) -> Result> { @@ -184,20 +182,13 @@ impl WasmtimeInstance { impl WasmInstance for WasmtimeInstance { fn call_with_allocation_stats( &mut self, - method: InvokeMethod, + method: &str, data: &[u8], ) -> (Result>, Option) { let mut allocation_stats = None; let result = self.call_impl(method, data, &mut allocation_stats); (result, allocation_stats) } - - fn get_global_const(&mut self, name: &str) -> Result> { - match &mut self.strategy { - Strategy::RecreateInstance(ref mut instance_creator) => - instance_creator.instantiate()?.get_global_val(name), - } - } } /// Prepare a directory structure and a config file to enable wasmtime caching. diff --git a/substrate/client/keystore/Cargo.toml b/substrate/client/keystore/Cargo.toml index f7f3b138ec89376cb5d7dda93f30fd2da5887972..908e0aa8f38ced48facaf99d05c0d7771f03fa26 100644 --- a/substrate/client/keystore/Cargo.toml +++ b/substrate/client/keystore/Cargo.toml @@ -19,8 +19,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = "6.1" parking_lot = "0.12.1" -serde_json = "1.0.113" -thiserror = "1.0" +serde_json = { workspace = true, default-features = true } +thiserror = { workspace = true } sp-application-crypto = { path = "../../primitives/application-crypto" } sp-core = { path = "../../primitives/core" } sp-keystore = { path = "../../primitives/keystore" } diff --git a/substrate/client/merkle-mountain-range/rpc/Cargo.toml b/substrate/client/merkle-mountain-range/rpc/Cargo.toml index 901c573b6bd65664e4d80d46d9686e36958adec6..9b391b76ea00b5e48700b7213afb691c149167ab 100644 --- a/substrate/client/merkle-mountain-range/rpc/Cargo.toml +++ b/substrate/client/merkle-mountain-range/rpc/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } jsonrpsee = { version = "0.22", features = ["client-core", "macros", "server"] } -serde = { version = "1.0.196", features = ["derive"] } +serde = { features = ["derive"], workspace = true, default-features = true } sp-api = { path = "../../../primitives/api" } sp-blockchain = { path = "../../../primitives/blockchain" } sp-core = { path = "../../../primitives/core" } @@ -25,4 +25,4 @@ sp-mmr-primitives = { path = "../../../primitives/merkle-mountain-range" } sp-runtime = { path = "../../../primitives/runtime" } [dev-dependencies] -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } diff --git a/substrate/client/mixnet/Cargo.toml b/substrate/client/mixnet/Cargo.toml index 8bcd963bd56fc8b4e9a9b8d83242b24d2cf659e9..736184f4668c8c96c660502774da90aedd85b731 100644 --- a/substrate/client/mixnet/Cargo.toml +++ b/substrate/client/mixnet/Cargo.toml @@ -37,4 +37,4 @@ sp-core = { path = "../../primitives/core" } sp-keystore = { path = "../../primitives/keystore" } sp-mixnet = { path = "../../primitives/mixnet" } sp-runtime = { path = "../../primitives/runtime" } -thiserror = "1.0" +thiserror = { workspace = true } diff --git a/substrate/client/network/Cargo.toml b/substrate/client/network/Cargo.toml index d2ca8ba8c3f2d1b054e987e2747f83d142976dc8..cbf74440dc1a83933d1e994294a7d37609c72331 100644 --- a/substrate/client/network/Cargo.toml +++ b/substrate/client/network/Cargo.toml @@ -36,10 +36,10 @@ parking_lot = "0.12.1" partial_sort = "0.2.0" pin-project = "1.0.12" rand = "0.8.5" -serde = { version = "1.0.196", features = ["derive"] } -serde_json = "1.0.113" +serde = { features = ["derive"], workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } smallvec = "1.11.0" -thiserror = "1.0" +thiserror = { workspace = true } tokio = { version = "1.22.0", features = ["macros", "sync"] } tokio-stream = "0.1.7" unsigned-varint = { version = "0.7.1", features = ["asynchronous_codec", "futures"] } diff --git a/substrate/client/network/bitswap/Cargo.toml b/substrate/client/network/bitswap/Cargo.toml index 63544b069e5198c7890ccb0e8f0b2359a0b1ddc0..7ef3ea212427848510e2e4b910efbe3d54e01a48 100644 --- a/substrate/client/network/bitswap/Cargo.toml +++ b/substrate/client/network/bitswap/Cargo.toml @@ -25,7 +25,7 @@ futures = "0.3.21" libp2p-identity = { version = "0.1.3", features = ["peerid"] } log = { workspace = true, default-features = true } prost = "0.12" -thiserror = "1.0" +thiserror = { workspace = true } unsigned-varint = { version = "0.7.1", features = ["asynchronous_codec", "futures"] } sc-client-api = { path = "../../api" } sc-network = { path = ".." } diff --git a/substrate/client/network/light/Cargo.toml b/substrate/client/network/light/Cargo.toml index a83be7538bef1e7d910747ee917329ff2ea4d2ff..c757f727fb71a7c2261090d741e286d7851d8239 100644 --- a/substrate/client/network/light/Cargo.toml +++ b/substrate/client/network/light/Cargo.toml @@ -33,4 +33,4 @@ sc-client-api = { path = "../../api" } sc-network = { path = ".." } sp-core = { path = "../../../primitives/core" } sp-runtime = { path = "../../../primitives/runtime" } -thiserror = "1.0" +thiserror = { workspace = true } diff --git a/substrate/client/network/sync/Cargo.toml b/substrate/client/network/sync/Cargo.toml index 19f86e87ac50fbe095ca888d5f58c5ceaa758473..32ba3b6356c0ceb0dcf38bda7d333ffddf67d238 100644 --- a/substrate/client/network/sync/Cargo.toml +++ b/substrate/client/network/sync/Cargo.toml @@ -31,7 +31,7 @@ mockall = "0.11.3" prost = "0.12" schnellru = "0.2.1" smallvec = "1.11.0" -thiserror = "1.0" +thiserror = { workspace = true } tokio-stream = "0.1.14" tokio = { version = "1.32.0", features = ["macros", "time"] } fork-tree = { path = "../../../utils/fork-tree" } diff --git a/substrate/client/network/sync/src/engine.rs b/substrate/client/network/sync/src/engine.rs index c1a7009eeb01745ca5a7069eb2a258768ad2c49c..24640fdc45576191f30d190a39ee8186d27520c8 100644 --- a/substrate/client/network/sync/src/engine.rs +++ b/substrate/client/network/sync/src/engine.rs @@ -656,6 +656,8 @@ where Some(event) => self.process_notification_event(event), None => return, }, + // TODO: setting of warp sync target block should be moved to the initialization of + // `SyncingEngine`, see https://github.com/paritytech/polkadot-sdk/issues/3537. warp_target_block_header = &mut self.warp_sync_target_block_header_rx_fused => { if let Err(_) = self.pass_warp_sync_target_block_header(warp_target_block_header) { error!( diff --git a/substrate/client/network/sync/src/lib.rs b/substrate/client/network/sync/src/lib.rs index e23a23e735d3e25b60b6b5bda8dd4beadcfe9377..9f6c0f45d089ce415f9e8c0ae792de1d7a327407 100644 --- a/substrate/client/network/sync/src/lib.rs +++ b/substrate/client/network/sync/src/lib.rs @@ -28,7 +28,7 @@ mod justification_requests; mod pending_responses; mod request_metrics; mod schema; -mod types; +pub mod types; pub mod block_relay_protocol; pub mod block_request_handler; diff --git a/substrate/client/network/sync/src/strategy.rs b/substrate/client/network/sync/src/strategy.rs index 7d6e6a8d3b8b79e9e926261d298e4fcacf7c714e..dabcf37ae6321a4501095a4ba3140c1ccf424b74 100644 --- a/substrate/client/network/sync/src/strategy.rs +++ b/substrate/client/network/sync/src/strategy.rs @@ -30,7 +30,7 @@ use crate::{ }; use chain_sync::{ChainSync, ChainSyncAction, ChainSyncMode}; use libp2p::PeerId; -use log::{debug, error, info}; +use log::{debug, error, info, warn}; use prometheus_endpoint::Registry; use sc_client_api::{BlockBackend, ProofProvider}; use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock}; @@ -159,7 +159,7 @@ impl From> for SyncingAction { /// Proxy to specific syncing strategies. pub struct SyncingStrategy { - /// Syncing configuration. + /// Initial syncing configuration. config: SyncingConfig, /// Client used by syncing strategies. client: Arc, @@ -466,15 +466,27 @@ where &mut self, target_header: B::Header, ) -> Result<(), ()> { - match self.warp { - Some(ref mut warp) => { - warp.set_target_block(target_header); - Ok(()) + match self.config.mode { + SyncMode::Warp => match self.warp { + Some(ref mut warp) => { + warp.set_target_block(target_header); + Ok(()) + }, + None => { + // As mode is set to warp sync, but no warp sync strategy is active, this means + // that warp sync has already finished / was skipped. + warn!( + target: LOG_TARGET, + "Discarding warp sync target, as warp sync was seemingly skipped due \ + to node being (partially) synced.", + ); + Ok(()) + }, }, - None => { + _ => { error!( target: LOG_TARGET, - "Cannot set warp sync target block: no warp sync strategy is active." + "Cannot set warp sync target block: not in warp sync mode." ); debug_assert!(false); Err(()) @@ -587,3 +599,63 @@ where } } } + +#[cfg(test)] +mod test { + use super::*; + use futures::executor::block_on; + use sc_block_builder::BlockBuilderBuilder; + use substrate_test_runtime_client::{ + ClientBlockImportExt, ClientExt, DefaultTestClientBuilderExt, TestClientBuilder, + TestClientBuilderExt, + }; + + /// Regression test for crash when starting already synced parachain node with `--sync=warp`. + /// We must remove this after setting of warp sync target block is moved to initialization of + /// `SyncingEngine` (issue https://github.com/paritytech/polkadot-sdk/issues/3537). + #[test] + fn set_target_block_finished_warp_sync() { + // Populate database with finalized state. + let mut client = Arc::new(TestClientBuilder::new().build()); + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().best_hash) + .with_parent_block_number(client.chain_info().best_number) + .build() + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); + let just = (*b"TEST", Vec::new()); + client.finalize_block(block.hash(), Some(just)).unwrap(); + let target_block = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().best_hash) + .with_parent_block_number(client.chain_info().best_number) + .build() + .unwrap() + .build() + .unwrap() + .block; + + // Initialize syncing strategy. + let config = SyncingConfig { + mode: SyncMode::Warp, + max_parallel_downloads: 3, + max_blocks_per_request: 64, + metrics_registry: None, + }; + let mut strategy = + SyncingStrategy::new(config, client, Some(WarpSyncConfig::WaitForTarget)).unwrap(); + + // Warp sync instantly finishes as we have finalized state in DB. + let actions = strategy.actions().unwrap(); + assert_eq!(actions.len(), 1); + assert!(matches!(actions[0], SyncingAction::Finished)); + assert!(strategy.warp.is_none()); + + // Try setting the target block. We mustn't crash. + strategy + .set_warp_sync_target_block_header(target_block.header().clone()) + .unwrap(); + } +} diff --git a/substrate/client/proposer-metrics/src/lib.rs b/substrate/client/proposer-metrics/src/lib.rs index 012e8ca769a96cb51141c5a13ca7ba04d76a8123..2856300cf8027b45bb0ef32b8781c533ae2e340c 100644 --- a/substrate/client/proposer-metrics/src/lib.rs +++ b/substrate/client/proposer-metrics/src/lib.rs @@ -44,11 +44,14 @@ impl MetricsLink { } /// The reason why proposing a block ended. +#[derive(Clone, Copy, PartialEq, Eq)] pub enum EndProposingReason { NoMoreTransactions, HitDeadline, HitBlockSizeLimit, HitBlockWeightLimit, + /// No transactions are allowed in the block. + TransactionForbidden, } /// Authorship metrics. @@ -112,6 +115,7 @@ impl Metrics { EndProposingReason::NoMoreTransactions => "no_more_transactions", EndProposingReason::HitBlockSizeLimit => "hit_block_size_limit", EndProposingReason::HitBlockWeightLimit => "hit_block_weight_limit", + EndProposingReason::TransactionForbidden => "transactions_forbidden", }; self.end_proposing_reason.with_label_values(&[reason]).inc(); diff --git a/substrate/client/rpc-api/Cargo.toml b/substrate/client/rpc-api/Cargo.toml index 27f8a1b9062416fc0836030835e95d8aeada1345..1b7af6a4a52fe650dd325cc38d92daddd9c7a9fa 100644 --- a/substrate/client/rpc-api/Cargo.toml +++ b/substrate/client/rpc-api/Cargo.toml @@ -18,9 +18,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", features = ["derive"] } -serde_json = "1.0.113" -thiserror = "1.0" +serde = { features = ["derive"], workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } +thiserror = { workspace = true } sc-chain-spec = { path = "../chain-spec" } sc-mixnet = { path = "../mixnet" } sc-transaction-pool-api = { path = "../transaction-pool/api" } diff --git a/substrate/client/rpc-servers/Cargo.toml b/substrate/client/rpc-servers/Cargo.toml index 3e04b927a222f5c35d7afdefd63263e69fa666b1..3adc81c57d594a355212a88f83682a0441bc7acc 100644 --- a/substrate/client/rpc-servers/Cargo.toml +++ b/substrate/client/rpc-servers/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] jsonrpsee = { version = "0.22", features = ["server"] } log = { workspace = true, default-features = true } -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } tokio = { version = "1.22.0", features = ["parking_lot"] } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } tower-http = { version = "0.4.0", features = ["cors"] } @@ -26,5 +26,4 @@ tower = { version = "0.4.13", features = ["util"] } http = "0.2.8" hyper = "0.14.27" futures = "0.3.29" -pin-project = "1.1.3" governor = "0.6.0" diff --git a/substrate/client/rpc-servers/src/lib.rs b/substrate/client/rpc-servers/src/lib.rs index a22b7309ac1923cf58e99e0fbe59ec57d04feaf0..ad4b444c7ff425ea84ba25e756d61854c3a4569e 100644 --- a/substrate/client/rpc-servers/src/lib.rs +++ b/substrate/client/rpc-servers/src/lib.rs @@ -47,9 +47,9 @@ pub use jsonrpsee::{ id_providers::{RandomIntegerIdProvider, RandomStringIdProvider}, traits::IdProvider, }, - server::middleware::rpc::RpcServiceBuilder, + server::{middleware::rpc::RpcServiceBuilder, BatchRequestConfig}, }; -pub use middleware::{MetricsLayer, RateLimitLayer, RpcMetrics}; +pub use middleware::{Metrics, MiddlewareLayer, RpcMetrics}; const MEGABYTE: u32 = 1024 * 1024; @@ -81,6 +81,8 @@ pub struct Config<'a, M: Send + Sync + 'static> { pub id_provider: Option>, /// Tokio runtime handle. pub tokio_handle: tokio::runtime::Handle, + /// Batch request config. + pub batch_config: BatchRequestConfig, /// Rate limit calls per minute. pub rate_limit: Option, } @@ -103,6 +105,7 @@ where { let Config { addrs, + batch_config, cors, max_payload_in_mb, max_payload_out_mb, @@ -139,6 +142,7 @@ where ) .set_http_middleware(http_middleware) .set_message_buffer_capacity(message_buffer_capacity) + .set_batch_request_config(batch_config) .custom_tokio_runtime(tokio_handle.clone()); if let Some(provider) = id_provider { @@ -169,13 +173,22 @@ where let is_websocket = ws::is_upgrade_request(&req); let transport_label = if is_websocket { "ws" } else { "http" }; - let metrics = metrics.map(|m| MetricsLayer::new(m, transport_label)); - let rate_limit = rate_limit.map(|r| RateLimitLayer::per_minute(r)); + let middleware_layer = match (metrics, rate_limit) { + (None, None) => None, + (Some(metrics), None) => Some( + MiddlewareLayer::new().with_metrics(Metrics::new(metrics, transport_label)), + ), + (None, Some(rate_limit)) => + Some(MiddlewareLayer::new().with_rate_limit_per_minute(rate_limit)), + (Some(metrics), Some(rate_limit)) => Some( + MiddlewareLayer::new() + .with_metrics(Metrics::new(metrics, transport_label)) + .with_rate_limit_per_minute(rate_limit), + ), + }; - // NOTE: The metrics needs to run first to include rate-limited calls in the - // metrics. let rpc_middleware = - RpcServiceBuilder::new().option_layer(metrics.clone()).option_layer(rate_limit); + RpcServiceBuilder::new().option_layer(middleware_layer.clone()); let mut svc = service_builder.set_rpc_middleware(rpc_middleware).build(methods, stop_handle); @@ -187,9 +200,9 @@ where // Spawn a task to handle when the connection is closed. tokio_handle.spawn(async move { let now = std::time::Instant::now(); - metrics.as_ref().map(|m| m.ws_connect()); + middleware_layer.as_ref().map(|m| m.ws_connect()); on_disconnect.await; - metrics.as_ref().map(|m| m.ws_disconnect(now)); + middleware_layer.as_ref().map(|m| m.ws_disconnect(now)); }); } diff --git a/substrate/client/rpc-servers/src/middleware/metrics.rs b/substrate/client/rpc-servers/src/middleware/metrics.rs index c2d1956c3b3985e8e071ad5316dcc2a150410415..17849dc0c44846c10ed06b0f86d39783c5d29dc1 100644 --- a/substrate/client/rpc-servers/src/middleware/metrics.rs +++ b/substrate/client/rpc-servers/src/middleware/metrics.rs @@ -18,15 +18,9 @@ //! RPC middleware to collect prometheus metrics on RPC calls. -use std::{ - future::Future, - pin::Pin, - task::{Context, Poll}, - time::Instant, -}; +use std::time::Instant; -use jsonrpsee::{server::middleware::rpc::RpcServiceT, types::Request, MethodResponse}; -use pin_project::pin_project; +use jsonrpsee::{types::Request, MethodResponse}; use prometheus_endpoint::{ register, Counter, CounterVec, HistogramOpts, HistogramVec, Opts, PrometheusError, Registry, U64, @@ -77,7 +71,7 @@ impl RpcMetrics { "Total time [μs] of processed RPC calls", ) .buckets(HISTOGRAM_BUCKETS.to_vec()), - &["protocol", "method"], + &["protocol", "method", "is_rate_limited"], )?, metrics_registry, )?, @@ -97,7 +91,7 @@ impl RpcMetrics { "substrate_rpc_calls_finished", "Number of processed RPC calls (unique un-batched requests)", ), - &["protocol", "method", "is_error"], + &["protocol", "method", "is_error", "is_rate_limited"], )?, metrics_registry, )?, @@ -144,138 +138,90 @@ impl RpcMetrics { self.ws_sessions_closed.as_ref().map(|counter| counter.inc()); self.ws_sessions_time.with_label_values(&["ws"]).observe(micros as _); } -} - -/// Metrics layer. -#[derive(Clone)] -pub struct MetricsLayer { - inner: RpcMetrics, - transport_label: &'static str, -} - -impl MetricsLayer { - /// Create a new [`MetricsLayer`]. - pub fn new(metrics: RpcMetrics, transport_label: &'static str) -> Self { - Self { inner: metrics, transport_label } - } - - pub(crate) fn ws_connect(&self) { - self.inner.ws_connect(); - } - - pub(crate) fn ws_disconnect(&self, now: Instant) { - self.inner.ws_disconnect(now) - } -} - -impl tower::Layer for MetricsLayer { - type Service = Metrics; - - fn layer(&self, inner: S) -> Self::Service { - Metrics::new(inner, self.inner.clone(), self.transport_label) - } -} - -/// Metrics middleware. -#[derive(Clone)] -pub struct Metrics { - service: S, - metrics: RpcMetrics, - transport_label: &'static str, -} - -impl Metrics { - /// Create a new metrics middleware. - pub fn new(service: S, metrics: RpcMetrics, transport_label: &'static str) -> Metrics { - Metrics { service, metrics, transport_label } - } -} - -impl<'a, S> RpcServiceT<'a> for Metrics -where - S: Send + Sync + RpcServiceT<'a>, -{ - type Future = ResponseFuture<'a, S::Future>; - - fn call(&self, req: Request<'a>) -> Self::Future { - let now = Instant::now(); + pub(crate) fn on_call(&self, req: &Request, transport_label: &'static str) { log::trace!( target: "rpc_metrics", - "[{}] on_call name={} params={:?}", - self.transport_label, + "[{transport_label}] on_call name={} params={:?}", req.method_name(), req.params(), ); - self.metrics - .calls_started - .with_label_values(&[self.transport_label, req.method_name()]) + + self.calls_started + .with_label_values(&[transport_label, req.method_name()]) .inc(); + } - ResponseFuture { - fut: self.service.call(req.clone()), - metrics: self.metrics.clone(), - req, - now, - transport_label: self.transport_label, - } + pub(crate) fn on_response( + &self, + req: &Request, + rp: &MethodResponse, + is_rate_limited: bool, + transport_label: &'static str, + now: Instant, + ) { + log::trace!(target: "rpc_metrics", "[{transport_label}] on_response started_at={:?}", now); + log::trace!(target: "rpc_metrics::extra", "[{transport_label}] result={}", rp.as_result()); + + let micros = now.elapsed().as_micros(); + log::debug!( + target: "rpc_metrics", + "[{transport_label}] {} call took {} μs", + req.method_name(), + micros, + ); + self.calls_time + .with_label_values(&[ + transport_label, + req.method_name(), + if is_rate_limited { "true" } else { "false" }, + ]) + .observe(micros as _); + self.calls_finished + .with_label_values(&[ + transport_label, + req.method_name(), + // the label "is_error", so `success` should be regarded as false + // and vice-versa to be registrered correctly. + if rp.is_success() { "false" } else { "true" }, + if is_rate_limited { "true" } else { "false" }, + ]) + .inc(); } } -/// Response future for metrics. -#[pin_project] -pub struct ResponseFuture<'a, F> { - #[pin] - fut: F, - metrics: RpcMetrics, - req: Request<'a>, - now: Instant, - transport_label: &'static str, +/// Metrics with transport label. +#[derive(Clone, Debug)] +pub struct Metrics { + pub(crate) inner: RpcMetrics, + pub(crate) transport_label: &'static str, } -impl<'a, F> std::fmt::Debug for ResponseFuture<'a, F> { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.write_str("ResponseFuture") +impl Metrics { + /// Create a new [`Metrics`]. + pub fn new(metrics: RpcMetrics, transport_label: &'static str) -> Self { + Self { inner: metrics, transport_label } } -} -impl<'a, F: Future> Future for ResponseFuture<'a, F> { - type Output = F::Output; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = self.project(); + pub(crate) fn ws_connect(&self) { + self.inner.ws_connect(); + } - let res = this.fut.poll(cx); - if let Poll::Ready(rp) = &res { - let method_name = this.req.method_name(); - let transport_label = &this.transport_label; - let now = this.now; - let metrics = &this.metrics; + pub(crate) fn ws_disconnect(&self, now: Instant) { + self.inner.ws_disconnect(now) + } - log::trace!(target: "rpc_metrics", "[{transport_label}] on_response started_at={:?}", now); - log::trace!(target: "rpc_metrics::extra", "[{transport_label}] result={:?}", rp); + pub(crate) fn on_call(&self, req: &Request) { + self.inner.on_call(req, self.transport_label) + } - let micros = now.elapsed().as_micros(); - log::debug!( - target: "rpc_metrics", - "[{transport_label}] {method_name} call took {} μs", - micros, - ); - metrics - .calls_time - .with_label_values(&[transport_label, method_name]) - .observe(micros as _); - metrics - .calls_finished - .with_label_values(&[ - transport_label, - method_name, - // the label "is_error", so `success` should be regarded as false - // and vice-versa to be registrered correctly. - if rp.is_success() { "false" } else { "true" }, - ]) - .inc(); - } - res + pub(crate) fn on_response( + &self, + req: &Request, + rp: &MethodResponse, + is_rate_limited: bool, + now: Instant, + ) { + self.inner.on_response(req, rp, is_rate_limited, self.transport_label, now) } } diff --git a/substrate/client/rpc-servers/src/middleware/mod.rs b/substrate/client/rpc-servers/src/middleware/mod.rs index cac516913d327f46ccf6ba1f81c7fb447873509b..88ed8b2f433580fa2d87f1a70eacc32019027f2a 100644 --- a/substrate/client/rpc-servers/src/middleware/mod.rs +++ b/substrate/client/rpc-servers/src/middleware/mod.rs @@ -18,10 +18,131 @@ //! JSON-RPC specific middleware. -/// Grafana metrics middleware. -pub mod metrics; -/// Rate limit middleware. -pub mod rate_limit; +use std::{ + num::NonZeroU32, + time::{Duration, Instant}, +}; + +use futures::future::{BoxFuture, FutureExt}; +use governor::{clock::Clock, Jitter}; +use jsonrpsee::{ + server::middleware::rpc::RpcServiceT, + types::{ErrorObject, Id, Request}, + MethodResponse, +}; + +mod metrics; +mod rate_limit; pub use metrics::*; pub use rate_limit::*; + +const MAX_JITTER: Duration = Duration::from_millis(50); +const MAX_RETRIES: usize = 10; + +/// JSON-RPC middleware layer. +#[derive(Debug, Clone, Default)] +pub struct MiddlewareLayer { + rate_limit: Option, + metrics: Option, +} + +impl MiddlewareLayer { + /// Create an empty MiddlewareLayer. + pub fn new() -> Self { + Self::default() + } + + /// Enable new rate limit middleware enforced per minute. + pub fn with_rate_limit_per_minute(self, n: NonZeroU32) -> Self { + Self { rate_limit: Some(RateLimit::per_minute(n)), metrics: self.metrics } + } + + /// Enable metrics middleware. + pub fn with_metrics(self, metrics: Metrics) -> Self { + Self { rate_limit: self.rate_limit, metrics: Some(metrics) } + } + + /// Register a new websocket connection. + pub fn ws_connect(&self) { + self.metrics.as_ref().map(|m| m.ws_connect()); + } + + /// Register that a websocket connection was closed. + pub fn ws_disconnect(&self, now: Instant) { + self.metrics.as_ref().map(|m| m.ws_disconnect(now)); + } +} + +impl tower::Layer for MiddlewareLayer { + type Service = Middleware; + + fn layer(&self, service: S) -> Self::Service { + Middleware { service, rate_limit: self.rate_limit.clone(), metrics: self.metrics.clone() } + } +} + +/// JSON-RPC middleware that handles metrics +/// and rate-limiting. +/// +/// These are part of the same middleware +/// because the metrics needs to know whether +/// a call was rate-limited or not because +/// it will impact the roundtrip for a call. +pub struct Middleware { + service: S, + rate_limit: Option, + metrics: Option, +} + +impl<'a, S> RpcServiceT<'a> for Middleware +where + S: Send + Sync + RpcServiceT<'a> + Clone + 'static, +{ + type Future = BoxFuture<'a, MethodResponse>; + + fn call(&self, req: Request<'a>) -> Self::Future { + let now = Instant::now(); + + self.metrics.as_ref().map(|m| m.on_call(&req)); + + let service = self.service.clone(); + let rate_limit = self.rate_limit.clone(); + let metrics = self.metrics.clone(); + + async move { + let mut is_rate_limited = false; + + if let Some(limit) = rate_limit.as_ref() { + let mut attempts = 0; + let jitter = Jitter::up_to(MAX_JITTER); + + loop { + if attempts >= MAX_RETRIES { + return reject_too_many_calls(req.id); + } + + if let Err(rejected) = limit.inner.check() { + tokio::time::sleep(jitter + rejected.wait_time_from(limit.clock.now())) + .await; + } else { + break; + } + + is_rate_limited = true; + attempts += 1; + } + } + + let rp = service.call(req.clone()).await; + metrics.as_ref().map(|m| m.on_response(&req, &rp, is_rate_limited, now)); + + rp + } + .boxed() + } +} + +fn reject_too_many_calls(id: Id) -> MethodResponse { + MethodResponse::error(id, ErrorObject::owned(-32999, "RPC rate limit exceeded", None::<()>)) +} diff --git a/substrate/client/rpc-servers/src/middleware/rate_limit.rs b/substrate/client/rpc-servers/src/middleware/rate_limit.rs index cdcc3ebf66f7d0ab9d2ba515b0c128b55281f590..23335eb37686c30d6bf148a2fd38f75013aef606 100644 --- a/substrate/client/rpc-servers/src/middleware/rate_limit.rs +++ b/substrate/client/rpc-servers/src/middleware/rate_limit.rs @@ -16,92 +16,32 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! RPC rate limiting middleware. +//! RPC rate limit. -use std::{num::NonZeroU32, sync::Arc, time::Duration}; - -use futures::future::{BoxFuture, FutureExt}; use governor::{ - clock::{Clock, DefaultClock, QuantaClock}, + clock::{DefaultClock, QuantaClock}, middleware::NoOpMiddleware, state::{InMemoryState, NotKeyed}, - Jitter, -}; -use jsonrpsee::{ - server::middleware::rpc::RpcServiceT, - types::{ErrorObject, Id, Request}, - MethodResponse, + Quota, }; +use std::{num::NonZeroU32, sync::Arc}; type RateLimitInner = governor::RateLimiter; -const MAX_JITTER: Duration = Duration::from_millis(50); -const MAX_RETRIES: usize = 10; - -/// JSON-RPC rate limit middleware layer. +/// Rate limit. #[derive(Debug, Clone)] -pub struct RateLimitLayer(governor::Quota); - -impl RateLimitLayer { - /// Create new rate limit enforced per minute. - pub fn per_minute(n: NonZeroU32) -> Self { - Self(governor::Quota::per_minute(n)) - } +pub struct RateLimit { + pub(crate) inner: Arc, + pub(crate) clock: QuantaClock, } -/// JSON-RPC rate limit middleware -pub struct RateLimit { - service: S, - rate_limit: Arc, - clock: QuantaClock, -} - -impl tower::Layer for RateLimitLayer { - type Service = RateLimit; - - fn layer(&self, service: S) -> Self::Service { +impl RateLimit { + /// Create a new `RateLimit` per minute. + pub fn per_minute(n: NonZeroU32) -> Self { let clock = QuantaClock::default(); - RateLimit { - service, - rate_limit: Arc::new(RateLimitInner::direct_with_clock(self.0, &clock)), + Self { + inner: Arc::new(RateLimitInner::direct_with_clock(Quota::per_minute(n), &clock)), clock, } } } - -impl<'a, S> RpcServiceT<'a> for RateLimit -where - S: Send + Sync + RpcServiceT<'a> + Clone + 'static, -{ - type Future = BoxFuture<'a, MethodResponse>; - - fn call(&self, req: Request<'a>) -> Self::Future { - let service = self.service.clone(); - let rate_limit = self.rate_limit.clone(); - let clock = self.clock.clone(); - - async move { - let mut attempts = 0; - let jitter = Jitter::up_to(MAX_JITTER); - - loop { - if attempts >= MAX_RETRIES { - break reject_too_many_calls(req.id); - } - - if let Err(rejected) = rate_limit.check() { - tokio::time::sleep(jitter + rejected.wait_time_from(clock.now())).await; - } else { - break service.call(req).await; - } - - attempts += 1; - } - } - .boxed() - } -} - -fn reject_too_many_calls(id: Id) -> MethodResponse { - MethodResponse::error(id, ErrorObject::owned(-32999, "RPC rate limit exceeded", None::<()>)) -} diff --git a/substrate/client/rpc-spec-v2/Cargo.toml b/substrate/client/rpc-spec-v2/Cargo.toml index cb109becaf260150e7523d4c8348689dfe8b9d48..c62b3e789d389047f66004f5f76a6bb947ce8b9e 100644 --- a/substrate/client/rpc-spec-v2/Cargo.toml +++ b/substrate/client/rpc-spec-v2/Cargo.toml @@ -31,8 +31,8 @@ sc-client-api = { path = "../api" } sc-utils = { path = "../utils" } sc-rpc = { path = "../rpc" } codec = { package = "parity-scale-codec", version = "3.6.1" } -thiserror = "1.0" -serde = "1.0" +thiserror = { workspace = true } +serde = { workspace = true, default-features = true } hex = "0.4" futures = "0.3.21" parking_lot = "0.12.1" @@ -44,7 +44,7 @@ futures-util = { version = "0.3.30", default-features = false } rand = "0.8.5" [dev-dependencies] -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } tokio = { version = "1.22.0", features = ["macros"] } substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } substrate-test-runtime = { path = "../../test-utils/runtime" } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs index e94374aebd912b40074c194f656a1951fb097510..afa99f3aa1648823aee646304c7c2b2cd71da6e2 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs @@ -42,7 +42,14 @@ use sp_blockchain::{ Backend as BlockChainBackend, Error as BlockChainError, HeaderBackend, HeaderMetadata, Info, }; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; -use std::{collections::HashSet, sync::Arc}; +use std::{ + collections::{HashSet, VecDeque}, + sync::Arc, +}; + +/// The maximum number of finalized blocks provided by the +/// `Initialized` event. +const MAX_FINALIZED_BLOCKS: usize = 16; use super::subscription::InsertedSubscriptionData; @@ -95,6 +102,8 @@ struct InitialBlocks { /// /// It is a tuple of (block hash, parent hash). finalized_block_descendants: Vec<(Block::Hash, Block::Hash)>, + /// Hashes of the last finalized blocks + finalized_block_hashes: VecDeque, /// Blocks that should not be reported as pruned by the `Finalized` event. /// /// Substrate database will perform the pruning of height N at @@ -178,13 +187,14 @@ where } /// Get the in-memory blocks of the client, starting from the provided finalized hash. + /// + /// The reported blocks are pinned by this function. fn get_init_blocks_with_forks( &self, - startup_point: &StartupPoint, + finalized: Block::Hash, ) -> Result, SubscriptionManagementError> { let blockchain = self.backend.blockchain(); let leaves = blockchain.leaves()?; - let finalized = startup_point.finalized_hash; let mut pruned_forks = HashSet::new(); let mut finalized_block_descendants = Vec::new(); let mut unique_descendants = HashSet::new(); @@ -198,17 +208,47 @@ where // Ensure a `NewBlock` event is generated for all children of the // finalized block. Describe the tree route as (child_node, parent_node) // Note: the order of elements matters here. - let parents = std::iter::once(finalized).chain(blocks.clone()); + let mut parent = finalized; + for child in blocks { + let pair = (child, parent); - for pair in blocks.zip(parents) { if unique_descendants.insert(pair) { + // The finalized block is pinned below. + self.sub_handle.pin_block(&self.sub_id, child)?; finalized_block_descendants.push(pair); } + + parent = child; } } } - Ok(InitialBlocks { finalized_block_descendants, pruned_forks }) + let mut current_block = finalized; + // The header of the finalized block must not be pruned. + let Some(header) = blockchain.header(current_block)? else { + return Err(SubscriptionManagementError::BlockHeaderAbsent); + }; + + // Report at most `MAX_FINALIZED_BLOCKS`. Note: The node might not have that many blocks. + let mut finalized_block_hashes = VecDeque::with_capacity(MAX_FINALIZED_BLOCKS); + + // Pin the finalized block. + self.sub_handle.pin_block(&self.sub_id, current_block)?; + finalized_block_hashes.push_front(current_block); + current_block = *header.parent_hash(); + + for _ in 0..MAX_FINALIZED_BLOCKS - 1 { + let Ok(Some(header)) = blockchain.header(current_block) else { break }; + // Block cannot be reported if pinning fails. + if self.sub_handle.pin_block(&self.sub_id, current_block).is_err() { + break + }; + + finalized_block_hashes.push_front(current_block); + current_block = *header.parent_hash(); + } + + Ok(InitialBlocks { finalized_block_descendants, finalized_block_hashes, pruned_forks }) } /// Generate the initial events reported by the RPC `follow` method. @@ -220,18 +260,17 @@ where startup_point: &StartupPoint, ) -> Result<(Vec>, HashSet), SubscriptionManagementError> { - let init = self.get_init_blocks_with_forks(startup_point)?; + let init = self.get_init_blocks_with_forks(startup_point.finalized_hash)?; + // The initialized event is the first one sent. let initial_blocks = init.finalized_block_descendants; + let finalized_block_hashes = init.finalized_block_hashes; - // The initialized event is the first one sent. let finalized_block_hash = startup_point.finalized_hash; - self.sub_handle.pin_block(&self.sub_id, finalized_block_hash)?; - let finalized_block_runtime = self.generate_runtime_event(finalized_block_hash, None); let initialized_event = FollowEvent::Initialized(Initialized { - finalized_block_hash, + finalized_block_hashes: finalized_block_hashes.into(), finalized_block_runtime, with_runtime: self.with_runtime, }); @@ -240,8 +279,6 @@ where finalized_block_descendants.push(initialized_event); for (child, parent) in initial_blocks.into_iter() { - self.sub_handle.pin_block(&self.sub_id, child)?; - let new_runtime = self.generate_runtime_event(child, Some(parent)); let event = FollowEvent::NewBlock(NewBlock { diff --git a/substrate/client/rpc-spec-v2/src/chain_head/event.rs b/substrate/client/rpc-spec-v2/src/chain_head/event.rs index 560ab87eab405968e9b7348bfd37792307b87a8f..e0c804d16eb17853876a8b4bfd38d42d4b506f49 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/event.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/event.rs @@ -111,8 +111,8 @@ impl From for RuntimeEvent { #[derive(Debug, Clone, PartialEq, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Initialized { - /// The hash of the latest finalized block. - pub finalized_block_hash: Hash, + /// The hash of the lastest finalized blocks. + pub finalized_block_hashes: Vec, /// The runtime version of the finalized block. /// /// # Note @@ -135,12 +135,12 @@ impl Serialize for Initialized { { if self.with_runtime { let mut state = serializer.serialize_struct("Initialized", 2)?; - state.serialize_field("finalizedBlockHash", &self.finalized_block_hash)?; + state.serialize_field("finalizedBlockHashes", &self.finalized_block_hashes)?; state.serialize_field("finalizedBlockRuntime", &self.finalized_block_runtime)?; state.end() } else { let mut state = serializer.serialize_struct("Initialized", 1)?; - state.serialize_field("finalizedBlockHash", &self.finalized_block_hash)?; + state.serialize_field("finalizedBlockHashes", &self.finalized_block_hashes)?; state.end() } } @@ -348,13 +348,13 @@ mod tests { fn follow_initialized_event_no_updates() { // Runtime flag is false. let event: FollowEvent = FollowEvent::Initialized(Initialized { - finalized_block_hash: "0x1".into(), + finalized_block_hashes: vec!["0x1".into()], finalized_block_runtime: None, with_runtime: false, }); let ser = serde_json::to_string(&event).unwrap(); - let exp = r#"{"event":"initialized","finalizedBlockHash":"0x1"}"#; + let exp = r#"{"event":"initialized","finalizedBlockHashes":["0x1"]}"#; assert_eq!(ser, exp); let event_dec: FollowEvent = serde_json::from_str(exp).unwrap(); @@ -373,7 +373,7 @@ mod tests { let runtime_event = RuntimeEvent::Valid(RuntimeVersionEvent { spec: runtime.into() }); let mut initialized = Initialized { - finalized_block_hash: "0x1".into(), + finalized_block_hashes: vec!["0x1".into()], finalized_block_runtime: Some(runtime_event), with_runtime: true, }; @@ -381,7 +381,7 @@ mod tests { let ser = serde_json::to_string(&event).unwrap(); let exp = concat!( - r#"{"event":"initialized","finalizedBlockHash":"0x1","#, + r#"{"event":"initialized","finalizedBlockHashes":["0x1"],"#, r#""finalizedBlockRuntime":{"type":"valid","spec":{"specName":"ABC","implName":"Impl","#, r#""specVersion":1,"implVersion":0,"apis":{},"transactionVersion":0}}}"#, ); diff --git a/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs b/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs index d63a98a5cb0d93b3633864bd3405ad38c2b6e799..e81bd4bfa0b04aa3ad9e25e47e83e9ace1c7f985 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs @@ -63,7 +63,7 @@ impl ChainHeadMockClient { BlockImportNotification::new(header.hash(), BlockOrigin::Own, header, true, None, sink); for sink in self.import_sinks.lock().iter_mut() { - sink.unbounded_send(notification.clone()).unwrap(); + let _ = sink.unbounded_send(notification.clone()); } } @@ -83,7 +83,7 @@ impl ChainHeadMockClient { let notification = FinalityNotification::from_summary(summary, sink); for sink in self.finality_sinks.lock().iter_mut() { - sink.unbounded_send(notification.clone()).unwrap(); + let _ = sink.unbounded_send(notification.clone()); } } } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index 9544736d84c8e634613d0a7e363e9398f1aa0b9d..4d9dfb24e0a93bc45669ff1af4899eeb27694c18 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -173,7 +173,7 @@ async fn follow_subscription_produces_blocks() { // Initialized must always be reported first. let event: FollowEvent = get_next_event(&mut sub).await; let expected = FollowEvent::Initialized(Initialized { - finalized_block_hash: format!("{:?}", finalized_hash), + finalized_block_hashes: vec![format!("{:?}", finalized_hash)], finalized_block_runtime: None, with_runtime: false, }); @@ -242,12 +242,12 @@ async fn follow_with_runtime() { let event: FollowEvent = get_next_event(&mut sub).await; // it is basically json-encoded substrate_test_runtime_client::runtime::VERSION - let runtime_str = "{\"specName\":\"test\",\"implName\":\"parity-test\",\"authoringVersion\":0,\ - \"specVersion\":2,\"implVersion\":2,\"apis\":[[\"0xdf6acb689907609b\",4],\ + let runtime_str = "{\"specName\":\"test\",\"implName\":\"parity-test\",\"authoringVersion\":1,\ + \"specVersion\":2,\"implVersion\":2,\"apis\":[[\"0xdf6acb689907609b\",5],\ [\"0x37e397fc7c91f5e4\",2],[\"0xd2bc9897eed08f15\",3],[\"0x40fe3ad401f8959a\",6],\ [\"0xbc9d89904f5b923f\",1],[\"0xc6e9a76309f39b09\",2],[\"0xdd718d5cc53262d4\",1],\ [\"0xcbca25e39f142387\",2],[\"0xf78b278be53f454c\",2],[\"0xab3c0572291feb8b\",1],\ - [\"0xed99c5acb25eedf5\",3],[\"0xfbc577b9d747efd6\",1]],\"transactionVersion\":1,\"stateVersion\":0}"; + [\"0xed99c5acb25eedf5\",3],[\"0xfbc577b9d747efd6\",1]],\"transactionVersion\":1,\"stateVersion\":1}"; let runtime: RuntimeVersion = serde_json::from_str(runtime_str).unwrap(); @@ -255,7 +255,7 @@ async fn follow_with_runtime() { Some(RuntimeEvent::Valid(RuntimeVersionEvent { spec: runtime.clone().into() })); // Runtime must always be reported with the first event. let expected = FollowEvent::Initialized(Initialized { - finalized_block_hash: format!("{:?}", finalized_hash), + finalized_block_hashes: vec![format!("{:?}", finalized_hash)], finalized_block_runtime, with_runtime: false, }); @@ -1344,7 +1344,7 @@ async fn follow_generates_initial_blocks() { // Initialized must always be reported first. let event: FollowEvent = get_next_event(&mut sub).await; let expected = FollowEvent::Initialized(Initialized { - finalized_block_hash: format!("{:?}", finalized_hash), + finalized_block_hashes: vec![format!("{:?}", finalized_hash)], finalized_block_runtime: None, with_runtime: false, }); @@ -1896,7 +1896,7 @@ async fn follow_prune_best_block() { // Initialized must always be reported first. let event: FollowEvent = get_next_event(&mut sub).await; let expected = FollowEvent::Initialized(Initialized { - finalized_block_hash: format!("{:?}", finalized_hash), + finalized_block_hashes: vec![format!("{:?}", finalized_hash)], finalized_block_runtime: None, with_runtime: false, }); @@ -2081,6 +2081,7 @@ async fn follow_forks_pruned_block() { // ^^^ finalized // -> block 1 -> block 2_f -> block 3_f // + let finalized_hash = client.info().finalized_hash; let block_1 = BlockBuilderBuilder::new(&*client) .on_parent_block(client.chain_info().genesis_hash) @@ -2090,6 +2091,7 @@ async fn follow_forks_pruned_block() { .build() .unwrap() .block; + let block_1_hash = block_1.header.hash(); client.import(BlockOrigin::Own, block_1.clone()).await.unwrap(); let block_2 = BlockBuilderBuilder::new(&*client) @@ -2100,6 +2102,7 @@ async fn follow_forks_pruned_block() { .build() .unwrap() .block; + let block_2_hash = block_2.header.hash(); client.import(BlockOrigin::Own, block_2.clone()).await.unwrap(); let block_3 = BlockBuilderBuilder::new(&*client) @@ -2156,7 +2159,12 @@ async fn follow_forks_pruned_block() { // Initialized must always be reported first. let event: FollowEvent = get_next_event(&mut sub).await; let expected = FollowEvent::Initialized(Initialized { - finalized_block_hash: format!("{:?}", block_3_hash), + finalized_block_hashes: vec![ + format!("{:?}", finalized_hash), + format!("{:?}", block_1_hash), + format!("{:?}", block_2_hash), + format!("{:?}", block_3_hash), + ], finalized_block_runtime: None, with_runtime: false, }); @@ -2310,7 +2318,7 @@ async fn follow_report_multiple_pruned_block() { // Initialized must always be reported first. let event: FollowEvent = get_next_event(&mut sub).await; let expected = FollowEvent::Initialized(Initialized { - finalized_block_hash: format!("{:?}", finalized_hash), + finalized_block_hashes: vec![format!("{:?}", finalized_hash)], finalized_block_runtime: None, with_runtime: false, }); @@ -2632,7 +2640,7 @@ async fn follow_finalized_before_new_block() { let finalized_hash = client.info().finalized_hash; let event: FollowEvent = get_next_event(&mut sub).await; let expected = FollowEvent::Initialized(Initialized { - finalized_block_hash: format!("{:?}", finalized_hash), + finalized_block_hashes: vec![format!("{:?}", finalized_hash)], finalized_block_runtime: None, with_runtime: false, }); diff --git a/substrate/client/rpc-spec-v2/src/transaction/event.rs b/substrate/client/rpc-spec-v2/src/transaction/event.rs index 8b80fcda17beb22cece35c928f10d72175fa9458..882ac8490b07c00432c4296964b7f995329df3dc 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/event.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/event.rs @@ -20,23 +20,6 @@ use serde::{Deserialize, Serialize}; -/// The transaction was broadcasted to a number of peers. -/// -/// # Note -/// -/// The RPC does not guarantee that the peers have received the -/// transaction. -/// -/// When the number of peers is zero, the event guarantees that -/// shutting down the local node will lead to the transaction -/// not being included in the chain. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct TransactionBroadcasted { - /// The number of peers the transaction was broadcasted to. - pub num_peers: usize, -} - /// The transaction was included in a block of the chain. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -59,9 +42,6 @@ pub struct TransactionError { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct TransactionDropped { - /// True if the transaction was broadcasted to other peers and - /// may still be included in the block. - pub broadcasted: bool, /// Reason of the event. pub error: String, } @@ -70,20 +50,17 @@ pub struct TransactionDropped { /// /// The status events can be grouped based on their kinds as: /// -/// 1. Runtime validated the transaction: +/// 1. Runtime validated the transaction and it entered the pool: /// - `Validated` /// -/// 2. Inside the `Ready` queue: -/// - `Broadcast` -/// -/// 3. Leaving the pool: +/// 2. Leaving the pool: /// - `BestChainBlockIncluded` /// - `Invalid` /// -/// 4. Block finalized: +/// 3. Block finalized: /// - `Finalized` /// -/// 5. At any time: +/// 4. At any time: /// - `Dropped` /// - `Error` /// @@ -101,8 +78,6 @@ pub struct TransactionDropped { pub enum TransactionEvent { /// The transaction was validated by the runtime. Validated, - /// The transaction was broadcasted to a number of peers. - Broadcasted(TransactionBroadcasted), /// The transaction was included in a best block of the chain. /// /// # Note @@ -159,7 +134,6 @@ enum TransactionEventBlockIR { #[serde(tag = "event")] enum TransactionEventNonBlockIR { Validated, - Broadcasted(TransactionBroadcasted), Error(TransactionError), Invalid(TransactionError), Dropped(TransactionDropped), @@ -186,8 +160,6 @@ impl From> for TransactionEventIR { match value { TransactionEvent::Validated => TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Validated), - TransactionEvent::Broadcasted(event) => - TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Broadcasted(event)), TransactionEvent::BestChainBlockIncluded(event) => TransactionEventIR::Block(TransactionEventBlockIR::BestChainBlockIncluded(event)), TransactionEvent::Finalized(event) => @@ -207,8 +179,6 @@ impl From> for TransactionEvent { match value { TransactionEventIR::NonBlock(status) => match status { TransactionEventNonBlockIR::Validated => TransactionEvent::Validated, - TransactionEventNonBlockIR::Broadcasted(event) => - TransactionEvent::Broadcasted(event), TransactionEventNonBlockIR::Error(event) => TransactionEvent::Error(event), TransactionEventNonBlockIR::Invalid(event) => TransactionEvent::Invalid(event), TransactionEventNonBlockIR::Dropped(event) => TransactionEvent::Dropped(event), @@ -239,19 +209,6 @@ mod tests { assert_eq!(event_dec, event); } - #[test] - fn broadcasted_event() { - let event: TransactionEvent<()> = - TransactionEvent::Broadcasted(TransactionBroadcasted { num_peers: 2 }); - let ser = serde_json::to_string(&event).unwrap(); - - let exp = r#"{"event":"broadcasted","numPeers":2}"#; - assert_eq!(ser, exp); - - let event_dec: TransactionEvent<()> = serde_json::from_str(exp).unwrap(); - assert_eq!(event_dec, event); - } - #[test] fn best_chain_event() { let event: TransactionEvent<()> = TransactionEvent::BestChainBlockIncluded(None); @@ -320,13 +277,11 @@ mod tests { #[test] fn dropped_event() { - let event: TransactionEvent<()> = TransactionEvent::Dropped(TransactionDropped { - broadcasted: true, - error: "abc".to_string(), - }); + let event: TransactionEvent<()> = + TransactionEvent::Dropped(TransactionDropped { error: "abc".to_string() }); let ser = serde_json::to_string(&event).unwrap(); - let exp = r#"{"event":"dropped","broadcasted":true,"error":"abc"}"#; + let exp = r#"{"event":"dropped","error":"abc"}"#; assert_eq!(ser, exp); let event_dec: TransactionEvent<()> = serde_json::from_str(exp).unwrap(); diff --git a/substrate/client/rpc-spec-v2/src/transaction/mod.rs b/substrate/client/rpc-spec-v2/src/transaction/mod.rs index 74268a5372a37a7f40d79e50d2ecf6659315d534..514ccf047dc28570c78543755577661a6a7791af 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/mod.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/mod.rs @@ -35,9 +35,6 @@ pub mod transaction; pub mod transaction_broadcast; pub use api::{TransactionApiServer, TransactionBroadcastApiServer}; -pub use event::{ - TransactionBlock, TransactionBroadcasted, TransactionDropped, TransactionError, - TransactionEvent, -}; +pub use event::{TransactionBlock, TransactionDropped, TransactionError, TransactionEvent}; pub use transaction::Transaction; pub use transaction_broadcast::TransactionBroadcast; diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests.rs b/substrate/client/rpc-spec-v2/src/transaction/tests.rs deleted file mode 100644 index 382f5adeae19ebd7da366036b5092b0208b62568..0000000000000000000000000000000000000000 --- a/substrate/client/rpc-spec-v2/src/transaction/tests.rs +++ /dev/null @@ -1,238 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use super::*; -use crate::{ - chain_head::test_utils::ChainHeadMockClient, hex_string, - transaction::TransactionBroadcast as RpcTransactionBroadcast, -}; -use assert_matches::assert_matches; -use codec::Encode; -use futures::Future; -use jsonrpsee::{rpc_params, MethodsError as Error, RpcModule}; -use sc_transaction_pool::*; -use sc_transaction_pool_api::{ChainEvent, MaintainedTransactionPool, TransactionPool}; -use sp_core::{testing::TaskExecutor, traits::SpawnNamed}; -use std::{pin::Pin, sync::Arc, time::Duration}; -use substrate_test_runtime_client::{prelude::*, AccountKeyring::*, Client}; -use substrate_test_runtime_transaction_pool::{uxt, TestApi}; -use tokio::sync::mpsc; - -type Block = substrate_test_runtime_client::runtime::Block; - -/// Wrap the `TaskExecutor` to know when the broadcast future is dropped. -#[derive(Clone)] -struct TaskExecutorBroadcast { - executor: TaskExecutor, - sender: mpsc::UnboundedSender<()>, -} - -/// The channel that receives events when the broadcast futures are dropped. -type TaskExecutorRecv = mpsc::UnboundedReceiver<()>; - -impl TaskExecutorBroadcast { - /// Construct a new `TaskExecutorBroadcast` and a receiver to know when the broadcast futures - /// are dropped. - fn new() -> (Self, TaskExecutorRecv) { - let (sender, recv) = mpsc::unbounded_channel(); - - (Self { executor: TaskExecutor::new(), sender }, recv) - } -} - -impl SpawnNamed for TaskExecutorBroadcast { - fn spawn( - &self, - name: &'static str, - group: Option<&'static str>, - future: futures::future::BoxFuture<'static, ()>, - ) { - let sender = self.sender.clone(); - let future = Box::pin(async move { - future.await; - let _ = sender.send(()); - }); - - self.executor.spawn(name, group, future) - } - - fn spawn_blocking( - &self, - name: &'static str, - group: Option<&'static str>, - future: futures::future::BoxFuture<'static, ()>, - ) { - let sender = self.sender.clone(); - let future = Box::pin(async move { - future.await; - let _ = sender.send(()); - }); - - self.executor.spawn_blocking(name, group, future) - } -} - -/// Initial Alice account nonce. -const ALICE_NONCE: u64 = 209; - -fn create_basic_pool_with_genesis( - test_api: Arc, -) -> (BasicPool, Pin + Send>>) { - let genesis_hash = { - test_api - .chain() - .read() - .block_by_number - .get(&0) - .map(|blocks| blocks[0].0.header.hash()) - .expect("there is block 0. qed") - }; - BasicPool::new_test(test_api, genesis_hash, genesis_hash) -} - -fn maintained_pool() -> (BasicPool, Arc, futures::executor::ThreadPool) { - let api = Arc::new(TestApi::with_alice_nonce(ALICE_NONCE)); - let (pool, background_task) = create_basic_pool_with_genesis(api.clone()); - - let thread_pool = futures::executor::ThreadPool::new().unwrap(); - thread_pool.spawn_ok(background_task); - (pool, api, thread_pool) -} - -fn setup_api() -> ( - Arc, - Arc>, - Arc>>, - RpcModule< - TransactionBroadcast, ChainHeadMockClient>>, - >, - TaskExecutorRecv, -) { - let (pool, api, _) = maintained_pool(); - let pool = Arc::new(pool); - - let builder = TestClientBuilder::new(); - let client = Arc::new(builder.build()); - let client_mock = Arc::new(ChainHeadMockClient::new(client.clone())); - - let (task_executor, executor_recv) = TaskExecutorBroadcast::new(); - - let tx_api = - RpcTransactionBroadcast::new(client_mock.clone(), pool.clone(), Arc::new(task_executor)) - .into_rpc(); - - (api, pool, client_mock, tx_api, executor_recv) -} - -#[tokio::test] -async fn tx_broadcast_enters_pool() { - let (api, pool, client_mock, tx_api, _) = setup_api(); - - // Start at block 1. - let block_1_header = api.push_block(1, vec![], true); - - let uxt = uxt(Alice, ALICE_NONCE); - let xt = hex_string(&uxt.encode()); - - let operation_id: String = - tx_api.call("transaction_unstable_broadcast", rpc_params![&xt]).await.unwrap(); - - // Announce block 1 to `transaction_unstable_broadcast`. - client_mock.trigger_import_stream(block_1_header).await; - - // Ensure the tx propagated from `transaction_unstable_broadcast` to the transaction pool. - - // TODO: Improve testability by extending the `transaction_unstable_broadcast` with - // a middleware trait that intercepts the transaction status for testing. - let mut num_retries = 12; - while num_retries > 0 && pool.status().ready != 1 { - tokio::time::sleep(Duration::from_secs(5)).await; - num_retries -= 1; - } - assert_eq!(1, pool.status().ready); - assert_eq!(uxt.encode().len(), pool.status().ready_bytes); - - // Import block 2 with the transaction included. - let block_2_header = api.push_block(2, vec![uxt.clone()], true); - let block_2 = block_2_header.hash(); - - // Announce block 2 to the pool. - let event = ChainEvent::NewBestBlock { hash: block_2, tree_route: None }; - pool.maintain(event).await; - - assert_eq!(0, pool.status().ready); - - // Stop call can still be made. - let _: () = tx_api - .call("transaction_unstable_stop", rpc_params![&operation_id]) - .await - .unwrap(); -} - -#[tokio::test] -async fn tx_broadcast_invalid_tx() { - let (_, pool, _, tx_api, mut exec_recv) = setup_api(); - - // Invalid parameters. - let err = tx_api - .call::<_, serde_json::Value>("transaction_unstable_broadcast", [1u8]) - .await - .unwrap_err(); - assert_matches!(err, - Error::JsonRpc(err) if err.code() == super::error::json_rpc_spec::INVALID_PARAM_ERROR && err.message() == "Invalid params" - ); - - assert_eq!(0, pool.status().ready); - - // Invalid transaction that cannot be decoded. The broadcast silently exits. - let xt = "0xdeadbeef"; - let operation_id: String = - tx_api.call("transaction_unstable_broadcast", rpc_params![&xt]).await.unwrap(); - - assert_eq!(0, pool.status().ready); - - // Await the broadcast future to exit. - // Without this we'd be subject to races, where we try to call the stop before the tx is - // dropped. - exec_recv.recv().await.unwrap(); - - // The broadcast future was dropped, and the operation is no longer active. - // When the operation is not active, either from the tx being finalized or a - // terminal error; the stop method should return an error. - let err = tx_api - .call::<_, serde_json::Value>("transaction_unstable_stop", rpc_params![&operation_id]) - .await - .unwrap_err(); - assert_matches!(err, - Error::JsonRpc(err) if err.code() == super::error::json_rpc_spec::INVALID_PARAM_ERROR && err.message() == "Invalid operation id" - ); -} - -#[tokio::test] -async fn tx_invalid_stop() { - let (_, _, _, tx_api, _) = setup_api(); - - // Make an invalid stop call. - let err = tx_api - .call::<_, serde_json::Value>("transaction_unstable_stop", ["invalid_operation_id"]) - .await - .unwrap_err(); - assert_matches!(err, - Error::JsonRpc(err) if err.code() == super::error::json_rpc_spec::INVALID_PARAM_ERROR && err.message() == "Invalid operation id" - ); -} diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests/executor.rs b/substrate/client/rpc-spec-v2/src/transaction/tests/executor.rs new file mode 100644 index 0000000000000000000000000000000000000000..ff9aca79887c3f29025697f9f5832c914735e0d6 --- /dev/null +++ b/substrate/client/rpc-spec-v2/src/transaction/tests/executor.rs @@ -0,0 +1,100 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use sp_core::{testing::TaskExecutor, traits::SpawnNamed}; +use std::sync::{atomic::AtomicUsize, Arc}; +use tokio::sync::mpsc; + +/// Wrap the `TaskExecutor` to know when the broadcast future is dropped. +#[derive(Clone)] +pub struct TaskExecutorBroadcast { + executor: TaskExecutor, + sender: mpsc::UnboundedSender<()>, + num_tasks: Arc, +} + +/// The channel that receives events when the broadcast futures are dropped. +pub type TaskExecutorRecv = mpsc::UnboundedReceiver<()>; + +/// The state of the `TaskExecutorBroadcast`. +pub struct TaskExecutorState { + pub recv: TaskExecutorRecv, + pub num_tasks: Arc, +} + +impl TaskExecutorState { + pub fn num_tasks(&self) -> usize { + self.num_tasks.load(std::sync::atomic::Ordering::Acquire) + } +} + +impl TaskExecutorBroadcast { + /// Construct a new `TaskExecutorBroadcast` and a receiver to know when the broadcast futures + /// are dropped. + pub fn new() -> (Self, TaskExecutorState) { + let (sender, recv) = mpsc::unbounded_channel(); + let num_tasks = Arc::new(AtomicUsize::new(0)); + + ( + Self { executor: TaskExecutor::new(), sender, num_tasks: num_tasks.clone() }, + TaskExecutorState { recv, num_tasks }, + ) + } +} + +impl SpawnNamed for TaskExecutorBroadcast { + fn spawn( + &self, + name: &'static str, + group: Option<&'static str>, + future: futures::future::BoxFuture<'static, ()>, + ) { + let sender = self.sender.clone(); + let num_tasks = self.num_tasks.clone(); + + let future = Box::pin(async move { + num_tasks.fetch_add(1, std::sync::atomic::Ordering::AcqRel); + future.await; + num_tasks.fetch_sub(1, std::sync::atomic::Ordering::AcqRel); + + let _ = sender.send(()); + }); + + self.executor.spawn(name, group, future) + } + + fn spawn_blocking( + &self, + name: &'static str, + group: Option<&'static str>, + future: futures::future::BoxFuture<'static, ()>, + ) { + let sender = self.sender.clone(); + let num_tasks = self.num_tasks.clone(); + + let future = Box::pin(async move { + num_tasks.fetch_add(1, std::sync::atomic::Ordering::AcqRel); + future.await; + num_tasks.fetch_sub(1, std::sync::atomic::Ordering::AcqRel); + + let _ = sender.send(()); + }); + + self.executor.spawn_blocking(name, group, future) + } +} diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests/middleware_pool.rs b/substrate/client/rpc-spec-v2/src/transaction/tests/middleware_pool.rs new file mode 100644 index 0000000000000000000000000000000000000000..aa8ac572dec9d8d6de9212fcb94c97ce2b804da4 --- /dev/null +++ b/substrate/client/rpc-spec-v2/src/transaction/tests/middleware_pool.rs @@ -0,0 +1,187 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use codec::Encode; +use futures::Future; +use sc_transaction_pool::BasicPool; +use sc_transaction_pool_api::{ + ImportNotificationStream, PoolFuture, PoolStatus, ReadyTransactions, TransactionFor, + TransactionPool, TransactionSource, TransactionStatusStreamFor, TxHash, +}; + +use crate::hex_string; +use futures::{FutureExt, StreamExt}; + +use sp_runtime::traits::{Block as BlockT, NumberFor}; +use std::{collections::HashMap, pin::Pin, sync::Arc}; +use substrate_test_runtime_transaction_pool::TestApi; +use tokio::sync::mpsc; + +pub type Block = substrate_test_runtime_client::runtime::Block; + +pub type TxTestPool = MiddlewarePool; +pub type TxStatusType = sc_transaction_pool_api::TransactionStatus< + sc_transaction_pool_api::TxHash, + sc_transaction_pool_api::BlockHash, +>; +pub type TxStatusTypeTest = TxStatusType; + +/// The type of the event that the middleware captures. +#[derive(Debug, PartialEq)] +pub enum MiddlewarePoolEvent { + TransactionStatus { + transaction: String, + status: sc_transaction_pool_api::TransactionStatus< + ::Hash, + ::Hash, + >, + }, + PoolError { + transaction: String, + err: String, + }, +} + +/// The channel that receives events when the broadcast futures are dropped. +pub type MiddlewarePoolRecv = mpsc::UnboundedReceiver; + +/// Add a middleware to the transaction pool. +/// +/// This wraps the `submit_and_watch` to gain access to the events. +pub struct MiddlewarePool { + pub inner_pool: Arc>, + /// Send the middleware events to the test. + sender: mpsc::UnboundedSender, +} + +impl MiddlewarePool { + /// Construct a new [`MiddlewarePool`]. + pub fn new(pool: Arc>) -> (Self, MiddlewarePoolRecv) { + let (sender, recv) = mpsc::unbounded_channel(); + (MiddlewarePool { inner_pool: pool, sender }, recv) + } +} + +impl TransactionPool for MiddlewarePool { + type Block = as TransactionPool>::Block; + type Hash = as TransactionPool>::Hash; + type InPoolTransaction = as TransactionPool>::InPoolTransaction; + type Error = as TransactionPool>::Error; + + fn submit_at( + &self, + at: ::Hash, + source: TransactionSource, + xts: Vec>, + ) -> PoolFuture, Self::Error>>, Self::Error> { + self.inner_pool.submit_at(at, source, xts) + } + + fn submit_one( + &self, + at: ::Hash, + source: TransactionSource, + xt: TransactionFor, + ) -> PoolFuture, Self::Error> { + self.inner_pool.submit_one(at, source, xt) + } + + fn submit_and_watch( + &self, + at: ::Hash, + source: TransactionSource, + xt: TransactionFor, + ) -> PoolFuture>>, Self::Error> { + let pool = self.inner_pool.clone(); + let sender = self.sender.clone(); + let transaction = hex_string(&xt.encode()); + + async move { + let watcher = match pool.submit_and_watch(at, source, xt).await { + Ok(watcher) => watcher, + Err(err) => { + let _ = sender.send(MiddlewarePoolEvent::PoolError { + transaction: transaction.clone(), + err: err.to_string(), + }); + return Err(err); + }, + }; + + let watcher = watcher.map(move |status| { + let sender = sender.clone(); + let transaction = transaction.clone(); + + let _ = sender.send(MiddlewarePoolEvent::TransactionStatus { + transaction, + status: status.clone(), + }); + + status + }); + + Ok(watcher.boxed()) + } + .boxed() + } + + fn remove_invalid(&self, hashes: &[TxHash]) -> Vec> { + self.inner_pool.remove_invalid(hashes) + } + + fn status(&self) -> PoolStatus { + self.inner_pool.status() + } + + fn import_notification_stream(&self) -> ImportNotificationStream> { + self.inner_pool.import_notification_stream() + } + + fn hash_of(&self, xt: &TransactionFor) -> TxHash { + self.inner_pool.hash_of(xt) + } + + fn on_broadcasted(&self, propagations: HashMap, Vec>) { + self.inner_pool.on_broadcasted(propagations) + } + + fn ready_transaction(&self, hash: &TxHash) -> Option> { + self.inner_pool.ready_transaction(hash) + } + + fn ready_at( + &self, + at: NumberFor, + ) -> Pin< + Box< + dyn Future< + Output = Box> + Send>, + > + Send, + >, + > { + self.inner_pool.ready_at(at) + } + + fn ready(&self) -> Box> + Send> { + self.inner_pool.ready() + } + + fn futures(&self) -> Vec { + self.inner_pool.futures() + } +} diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests/mod.rs b/substrate/client/rpc-spec-v2/src/transaction/tests/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..ab0caaf906fd08049ccf6c9f421d3134d3689322 --- /dev/null +++ b/substrate/client/rpc-spec-v2/src/transaction/tests/mod.rs @@ -0,0 +1,24 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +mod executor; +mod middleware_pool; +#[macro_use] +mod setup; + +mod transaction_broadcast_tests; diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests/setup.rs b/substrate/client/rpc-spec-v2/src/transaction/tests/setup.rs new file mode 100644 index 0000000000000000000000000000000000000000..04ee7b9b4c94c4f3eac763ad06ba9b8964c2287e --- /dev/null +++ b/substrate/client/rpc-spec-v2/src/transaction/tests/setup.rs @@ -0,0 +1,120 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::{ + chain_head::test_utils::ChainHeadMockClient, + transaction::{ + api::TransactionBroadcastApiServer, + tests::executor::{TaskExecutorBroadcast, TaskExecutorState}, + TransactionBroadcast as RpcTransactionBroadcast, + }, +}; +use futures::Future; +use jsonrpsee::RpcModule; +use sc_transaction_pool::*; +use std::{pin::Pin, sync::Arc}; +use substrate_test_runtime_client::{prelude::*, Client}; +use substrate_test_runtime_transaction_pool::TestApi; + +use crate::transaction::tests::middleware_pool::{MiddlewarePool, MiddlewarePoolRecv}; + +pub type Block = substrate_test_runtime_client::runtime::Block; + +/// Initial Alice account nonce. +pub const ALICE_NONCE: u64 = 209; + +fn create_basic_pool_with_genesis( + test_api: Arc, + options: Options, +) -> (BasicPool, Pin + Send>>) { + let genesis_hash = { + test_api + .chain() + .read() + .block_by_number + .get(&0) + .map(|blocks| blocks[0].0.header.hash()) + .expect("there is block 0. qed") + }; + BasicPool::new_test(test_api, genesis_hash, genesis_hash, options) +} + +fn maintained_pool( + options: Options, +) -> (BasicPool, Arc, futures::executor::ThreadPool) { + let api = Arc::new(TestApi::with_alice_nonce(ALICE_NONCE)); + let (pool, background_task) = create_basic_pool_with_genesis(api.clone(), options); + + let thread_pool = futures::executor::ThreadPool::new().unwrap(); + thread_pool.spawn_ok(background_task); + (pool, api, thread_pool) +} + +pub fn setup_api( + options: Options, +) -> ( + Arc, + Arc, + Arc>>, + RpcModule>>>, + TaskExecutorState, + MiddlewarePoolRecv, +) { + let (pool, api, _) = maintained_pool(options); + let (pool, pool_state) = MiddlewarePool::new(Arc::new(pool).clone()); + let pool = Arc::new(pool); + + let builder = TestClientBuilder::new(); + let client = Arc::new(builder.build()); + let client_mock = Arc::new(ChainHeadMockClient::new(client.clone())); + + let (task_executor, executor_recv) = TaskExecutorBroadcast::new(); + + let tx_api = + RpcTransactionBroadcast::new(client_mock.clone(), pool.clone(), Arc::new(task_executor)) + .into_rpc(); + + (api, pool, client_mock, tx_api, executor_recv, pool_state) +} + +/// Get the next event from the provided middleware in at most 5 seconds. +macro_rules! get_next_event { + ($middleware:expr) => { + tokio::time::timeout(std::time::Duration::from_secs(5), $middleware.recv()) + .await + .unwrap() + .unwrap() + }; +} + +/// Collect the next number of transaction events from the provided middleware. +macro_rules! get_next_tx_events { + ($middleware:expr, $num:expr) => {{ + let mut events = std::collections::HashMap::new(); + for _ in 0..$num { + let event = get_next_event!($middleware); + match event { + crate::transaction::tests::middleware_pool::MiddlewarePoolEvent::TransactionStatus { transaction, status } => { + events.entry(transaction).or_insert_with(|| vec![]).push(status); + }, + other => panic!("Expected TransactionStatus, received {:?}", other), + }; + } + events + }}; +} diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..690a1a64d7460c656f5180c59120c951dfb04472 --- /dev/null +++ b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs @@ -0,0 +1,523 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::{hex_string, transaction::error::json_rpc_spec}; +use assert_matches::assert_matches; +use codec::Encode; +use jsonrpsee::{rpc_params, MethodsError as Error}; +use sc_transaction_pool::{Options, PoolLimit}; +use sc_transaction_pool_api::{ChainEvent, MaintainedTransactionPool, TransactionPool}; +use std::sync::Arc; +use substrate_test_runtime_client::AccountKeyring::*; +use substrate_test_runtime_transaction_pool::uxt; + +// Test helpers. +use crate::transaction::tests::{ + middleware_pool::{MiddlewarePoolEvent, TxStatusTypeTest}, + setup::{setup_api, ALICE_NONCE}, +}; + +#[tokio::test] +async fn tx_broadcast_enters_pool() { + let (api, pool, client_mock, tx_api, mut exec_middleware, mut pool_middleware) = + setup_api(Default::default()); + + // Start at block 1. + let block_1_header = api.push_block(1, vec![], true); + + let uxt = uxt(Alice, ALICE_NONCE); + let xt = hex_string(&uxt.encode()); + + let operation_id: String = + tx_api.call("transaction_unstable_broadcast", rpc_params![&xt]).await.unwrap(); + + // Announce block 1 to `transaction_unstable_broadcast`. + client_mock.trigger_import_stream(block_1_header).await; + + // Ensure the tx propagated from `transaction_unstable_broadcast` to the transaction pool. + let event = get_next_event!(&mut pool_middleware); + assert_eq!( + event, + MiddlewarePoolEvent::TransactionStatus { + transaction: xt.clone(), + status: TxStatusTypeTest::Ready + } + ); + + assert_eq!(1, pool.inner_pool.status().ready); + assert_eq!(uxt.encode().len(), pool.inner_pool.status().ready_bytes); + + // Import block 2 with the transaction included. + let block_2_header = api.push_block(2, vec![uxt.clone()], true); + let block_2 = block_2_header.hash(); + + // Announce block 2 to the pool. + let event = ChainEvent::NewBestBlock { hash: block_2, tree_route: None }; + pool.inner_pool.maintain(event).await; + assert_eq!(0, pool.inner_pool.status().ready); + + let event = get_next_event!(&mut pool_middleware); + assert_eq!( + event, + MiddlewarePoolEvent::TransactionStatus { + transaction: xt.clone(), + status: TxStatusTypeTest::InBlock((block_2, 0)) + } + ); + + // The future broadcast awaits for the finalized status to be reached. + // Force the future to exit by calling stop. + let _: () = tx_api + .call("transaction_unstable_stop", rpc_params![&operation_id]) + .await + .unwrap(); + + // Ensure the broadcast future finishes. + let _ = get_next_event!(&mut exec_middleware.recv); + assert_eq!(0, exec_middleware.num_tasks()); +} + +#[tokio::test] +async fn tx_broadcast_invalid_tx() { + let (_, pool, _, tx_api, mut exec_middleware, _) = setup_api(Default::default()); + + // Invalid parameters. + let err = tx_api + .call::<_, serde_json::Value>("transaction_unstable_broadcast", [1u8]) + .await + .unwrap_err(); + assert_matches!(err, + Error::JsonRpc(err) if err.code() == json_rpc_spec::INVALID_PARAM_ERROR && err.message() == "Invalid params" + ); + + assert_eq!(0, pool.status().ready); + + // Invalid transaction that cannot be decoded. The broadcast silently exits. + let xt = "0xdeadbeef"; + let operation_id: String = + tx_api.call("transaction_unstable_broadcast", rpc_params![&xt]).await.unwrap(); + + assert_eq!(0, pool.status().ready); + + // Await the broadcast future to exit. + // Without this we'd be subject to races, where we try to call the stop before the tx is + // dropped. + let _ = get_next_event!(&mut exec_middleware.recv); + assert_eq!(0, exec_middleware.num_tasks()); + + // The broadcast future was dropped, and the operation is no longer active. + // When the operation is not active, either from the tx being finalized or a + // terminal error; the stop method should return an error. + let err = tx_api + .call::<_, serde_json::Value>("transaction_unstable_stop", rpc_params![&operation_id]) + .await + .unwrap_err(); + assert_matches!(err, + Error::JsonRpc(err) if err.code() == json_rpc_spec::INVALID_PARAM_ERROR && err.message() == "Invalid operation id" + ); +} + +#[tokio::test] +async fn tx_stop_with_invalid_operation_id() { + let (_, _, _, tx_api, _, _) = setup_api(Default::default()); + + // Make an invalid stop call. + let err = tx_api + .call::<_, serde_json::Value>("transaction_unstable_stop", ["invalid_operation_id"]) + .await + .unwrap_err(); + assert_matches!(err, + Error::JsonRpc(err) if err.code() == json_rpc_spec::INVALID_PARAM_ERROR && err.message() == "Invalid operation id" + ); +} + +#[tokio::test] +async fn tx_broadcast_resubmits_future_nonce_tx() { + let (api, pool, client_mock, tx_api, mut exec_middleware, mut pool_middleware) = + setup_api(Default::default()); + + // Start at block 1. + let block_1_header = api.push_block(1, vec![], true); + let block_1 = block_1_header.hash(); + + let current_uxt = uxt(Alice, ALICE_NONCE); + let current_xt = hex_string(¤t_uxt.encode()); + // This lives in the future. + let future_uxt = uxt(Alice, ALICE_NONCE + 1); + let future_xt = hex_string(&future_uxt.encode()); + + let future_operation_id: String = tx_api + .call("transaction_unstable_broadcast", rpc_params![&future_xt]) + .await + .unwrap(); + + // Announce block 1 to `transaction_unstable_broadcast`. + client_mock.trigger_import_stream(block_1_header).await; + + // Ensure the tx propagated from `transaction_unstable_broadcast` to the transaction pool. + let event = get_next_event!(&mut pool_middleware); + assert_eq!( + event, + MiddlewarePoolEvent::TransactionStatus { + transaction: future_xt.clone(), + status: TxStatusTypeTest::Future + } + ); + + let event = ChainEvent::NewBestBlock { hash: block_1, tree_route: None }; + pool.inner_pool.maintain(event).await; + assert_eq!(0, pool.inner_pool.status().ready); + // Ensure the tx is in the future. + assert_eq!(1, pool.inner_pool.status().future); + + let block_2_header = api.push_block(2, vec![], true); + let block_2 = block_2_header.hash(); + + let operation_id: String = tx_api + .call("transaction_unstable_broadcast", rpc_params![¤t_xt]) + .await + .unwrap(); + assert_ne!(future_operation_id, operation_id); + + // Announce block 2 to `transaction_unstable_broadcast`. + client_mock.trigger_import_stream(block_2_header).await; + + // Collect the events of both transactions. + let events = get_next_tx_events!(&mut pool_middleware, 2); + // Transactions entered the ready queue. + assert_eq!(events.get(¤t_xt).unwrap(), &vec![TxStatusTypeTest::Ready]); + assert_eq!(events.get(&future_xt).unwrap(), &vec![TxStatusTypeTest::Ready]); + + let event = ChainEvent::NewBestBlock { hash: block_2, tree_route: None }; + pool.inner_pool.maintain(event).await; + assert_eq!(2, pool.inner_pool.status().ready); + assert_eq!(0, pool.inner_pool.status().future); + + // Finalize transactions. + let block_3_header = api.push_block(3, vec![current_uxt, future_uxt], true); + let block_3 = block_3_header.hash(); + client_mock.trigger_import_stream(block_3_header).await; + + let event = ChainEvent::Finalized { hash: block_3, tree_route: Arc::from(vec![]) }; + pool.inner_pool.maintain(event).await; + assert_eq!(0, pool.inner_pool.status().ready); + assert_eq!(0, pool.inner_pool.status().future); + + let events = get_next_tx_events!(&mut pool_middleware, 4); + assert_eq!( + events.get(¤t_xt).unwrap(), + &vec![TxStatusTypeTest::InBlock((block_3, 0)), TxStatusTypeTest::Finalized((block_3, 0))] + ); + assert_eq!( + events.get(&future_xt).unwrap(), + &vec![TxStatusTypeTest::InBlock((block_3, 1)), TxStatusTypeTest::Finalized((block_3, 1))] + ); + + // Both broadcast futures must exit. + let _ = get_next_event!(&mut exec_middleware.recv); + let _ = get_next_event!(&mut exec_middleware.recv); + assert_eq!(0, exec_middleware.num_tasks()); +} + +/// This test is similar to `tx_broadcast_enters_pool` +/// However the last block is announced as finalized to force the +/// broadcast future to exit before the `stop` is called. +#[tokio::test] +async fn tx_broadcast_stop_after_broadcast_finishes() { + let (api, pool, client_mock, tx_api, mut exec_middleware, mut pool_middleware) = + setup_api(Default::default()); + + // Start at block 1. + let block_1_header = api.push_block(1, vec![], true); + + let uxt = uxt(Alice, ALICE_NONCE); + let xt = hex_string(&uxt.encode()); + + let operation_id: String = + tx_api.call("transaction_unstable_broadcast", rpc_params![&xt]).await.unwrap(); + + // Announce block 1 to `transaction_unstable_broadcast`. + client_mock.trigger_import_stream(block_1_header).await; + + // Ensure the tx propagated from `transaction_unstable_broadcast` to the transaction + // pool.inner_pool. + let event = get_next_event!(&mut pool_middleware); + assert_eq!( + event, + MiddlewarePoolEvent::TransactionStatus { + transaction: xt.clone(), + status: TxStatusTypeTest::Ready + } + ); + + assert_eq!(1, pool.inner_pool.status().ready); + assert_eq!(uxt.encode().len(), pool.inner_pool.status().ready_bytes); + + // Import block 2 with the transaction included. + let block_2_header = api.push_block(2, vec![uxt.clone()], true); + let block_2 = block_2_header.hash(); + + // Announce block 2 to the pool.inner_pool. + let event = ChainEvent::Finalized { hash: block_2, tree_route: Arc::from(vec![]) }; + pool.inner_pool.maintain(event).await; + + assert_eq!(0, pool.inner_pool.status().ready); + + let event = get_next_event!(&mut pool_middleware); + assert_eq!( + event, + MiddlewarePoolEvent::TransactionStatus { + transaction: xt.clone(), + status: TxStatusTypeTest::InBlock((block_2, 0)) + } + ); + + let event = get_next_event!(&mut pool_middleware); + assert_eq!( + event, + MiddlewarePoolEvent::TransactionStatus { + transaction: xt.clone(), + status: TxStatusTypeTest::Finalized((block_2, 0)) + } + ); + + // Ensure the broadcast future terminated properly. + let _ = get_next_event!(&mut exec_middleware.recv); + assert_eq!(0, exec_middleware.num_tasks()); + + // The operation ID is no longer valid, check that the broadcast future + // cleared out the inner state of the operation. + let err = tx_api + .call::<_, serde_json::Value>("transaction_unstable_stop", rpc_params![&operation_id]) + .await + .unwrap_err(); + assert_matches!(err, + Error::JsonRpc(err) if err.code() == json_rpc_spec::INVALID_PARAM_ERROR && err.message() == "Invalid operation id" + ); +} + +#[tokio::test] +async fn tx_broadcast_resubmits_invalid_tx() { + let limits = PoolLimit { count: 8192, total_bytes: 20 * 1024 * 1024 }; + let options = Options { + ready: limits.clone(), + future: limits, + reject_future_transactions: false, + // This ensures that a transaction is not banned. + ban_time: std::time::Duration::ZERO, + }; + + let (api, pool, client_mock, tx_api, mut exec_middleware, mut pool_middleware) = + setup_api(options); + + let uxt = uxt(Alice, ALICE_NONCE); + let xt = hex_string(&uxt.encode()); + let _operation_id: String = + tx_api.call("transaction_unstable_broadcast", rpc_params![&xt]).await.unwrap(); + + let block_1_header = api.push_block(1, vec![], true); + let block_1 = block_1_header.hash(); + // Announce block 1 to `transaction_unstable_broadcast`. + client_mock.trigger_import_stream(block_1_header).await; + + // Ensure the tx propagated from `transaction_unstable_broadcast` to the transaction pool. + let event = get_next_event!(&mut pool_middleware); + assert_eq!( + event, + MiddlewarePoolEvent::TransactionStatus { + transaction: xt.clone(), + status: TxStatusTypeTest::Ready, + } + ); + assert_eq!(1, pool.inner_pool.status().ready); + assert_eq!(uxt.encode().len(), pool.inner_pool.status().ready_bytes); + + // Mark the transaction as invalid from the API, causing a temporary ban. + api.add_invalid(&uxt); + + // Push an event to the pool to ensure the transaction is excluded. + let event = ChainEvent::NewBestBlock { hash: block_1, tree_route: None }; + pool.inner_pool.maintain(event).await; + assert_eq!(1, pool.inner_pool.status().ready); + + // Ensure the `transaction_unstable_broadcast` is aware of the invalid transaction. + let event = get_next_event!(&mut pool_middleware); + // Because we have received an `Invalid` status, we try to broadcast the transaction with the + // next announced block. + assert_eq!( + event, + MiddlewarePoolEvent::TransactionStatus { + transaction: xt.clone(), + status: TxStatusTypeTest::Invalid + } + ); + + // Import block 2. + let block_2_header = api.push_block(2, vec![], true); + client_mock.trigger_import_stream(block_2_header).await; + + // Ensure we propagate the temporary ban error to `submit_and_watch`. + // This ensures we'll loop again with the next annmounced block and try to resubmit the + // transaction. The transaction remains temporarily banned until the pool is maintained. + let event = get_next_event!(&mut pool_middleware); + assert_matches!(event, MiddlewarePoolEvent::PoolError { transaction, err } if transaction == xt && err.contains("Transaction temporarily Banned")); + + // Import block 3. + let block_3_header = api.push_block(3, vec![], true); + let block_3 = block_3_header.hash(); + // Remove the invalid transaction from the pool to allow it to pass through. + api.remove_invalid(&uxt); + let event = ChainEvent::NewBestBlock { hash: block_3, tree_route: None }; + // We have to maintain the pool to ensure the transaction is no longer invalid. + // This clears out the banned transactions. + pool.inner_pool.maintain(event).await; + assert_eq!(0, pool.inner_pool.status().ready); + + // Announce block to `transaction_unstable_broadcast`. + client_mock.trigger_import_stream(block_3_header).await; + + let event = get_next_event!(&mut pool_middleware); + assert_eq!( + event, + MiddlewarePoolEvent::TransactionStatus { + transaction: xt.clone(), + status: TxStatusTypeTest::Ready, + } + ); + assert_eq!(1, pool.inner_pool.status().ready); + + let block_4_header = api.push_block(4, vec![uxt], true); + let block_4 = block_4_header.hash(); + let event = ChainEvent::Finalized { hash: block_4, tree_route: Arc::from(vec![]) }; + pool.inner_pool.maintain(event).await; + + let event = get_next_event!(&mut pool_middleware); + assert_eq!( + event, + MiddlewarePoolEvent::TransactionStatus { + transaction: xt.clone(), + status: TxStatusTypeTest::InBlock((block_4, 0)), + } + ); + let event = get_next_event!(&mut pool_middleware); + assert_eq!( + event, + MiddlewarePoolEvent::TransactionStatus { + transaction: xt.clone(), + status: TxStatusTypeTest::Finalized((block_4, 0)), + } + ); + + // Ensure the broadcast future terminated properly. + let _ = get_next_event!(&mut exec_middleware.recv); + assert_eq!(0, exec_middleware.num_tasks()); +} + +/// This is similar to `tx_broadcast_resubmits_invalid_tx`. +/// However, it forces the tx to be resubmited because of the pool +/// limits. Which is a different code path than the invalid tx. +#[tokio::test] +async fn tx_broadcast_resubmits_dropped_tx() { + let limits = PoolLimit { count: 1, total_bytes: 1000 }; + let options = Options { + ready: limits.clone(), + future: limits, + reject_future_transactions: false, + // This ensures that a transaction is not banned. + ban_time: std::time::Duration::ZERO, + }; + + let (api, pool, client_mock, tx_api, _, mut pool_middleware) = setup_api(options); + + let current_uxt = uxt(Alice, ALICE_NONCE); + let current_xt = hex_string(¤t_uxt.encode()); + // This lives in the future. + let future_uxt = uxt(Alice, ALICE_NONCE + 1); + let future_xt = hex_string(&future_uxt.encode()); + + // By default the `validate_transaction` mock uses priority 1 for + // transactions. Bump the priority to ensure other transactions + // are immediately dropped. + api.set_priority(¤t_uxt, 10); + + let current_operation_id: String = tx_api + .call("transaction_unstable_broadcast", rpc_params![¤t_xt]) + .await + .unwrap(); + + // Announce block 1 to `transaction_unstable_broadcast`. + let block_1_header = api.push_block(1, vec![], true); + let event = + ChainEvent::Finalized { hash: block_1_header.hash(), tree_route: Arc::from(vec![]) }; + pool.inner_pool.maintain(event).await; + client_mock.trigger_import_stream(block_1_header).await; + + let event = get_next_event!(&mut pool_middleware); + assert_eq!( + event, + MiddlewarePoolEvent::TransactionStatus { + transaction: current_xt.clone(), + status: TxStatusTypeTest::Ready, + } + ); + assert_eq!(1, pool.inner_pool.status().ready); + + // The future tx has priority 2, smaller than the current 10. + api.set_priority(&future_uxt, 2); + let future_operation_id: String = tx_api + .call("transaction_unstable_broadcast", rpc_params![&future_xt]) + .await + .unwrap(); + assert_ne!(current_operation_id, future_operation_id); + + let block_2_header = api.push_block(2, vec![], true); + let event = + ChainEvent::Finalized { hash: block_2_header.hash(), tree_route: Arc::from(vec![]) }; + pool.inner_pool.maintain(event).await; + client_mock.trigger_import_stream(block_2_header).await; + + // We must have at most 1 transaction in the pool, as per limits above. + assert_eq!(1, pool.inner_pool.status().ready); + + let event = get_next_event!(&mut pool_middleware); + assert_eq!( + event, + MiddlewarePoolEvent::PoolError { + transaction: future_xt.clone(), + err: "Transaction couldn't enter the pool because of the limit".into() + } + ); + + let block_3_header = api.push_block(3, vec![current_uxt], true); + let event = + ChainEvent::Finalized { hash: block_3_header.hash(), tree_route: Arc::from(vec![]) }; + pool.inner_pool.maintain(event).await; + client_mock.trigger_import_stream(block_3_header.clone()).await; + + // The first tx is in a finalzied block; the future tx must enter the pool. + let events = get_next_tx_events!(&mut pool_middleware, 3); + assert_eq!( + events.get(¤t_xt).unwrap(), + &vec![ + TxStatusTypeTest::InBlock((block_3_header.hash(), 0)), + TxStatusTypeTest::Finalized((block_3_header.hash(), 0)) + ] + ); + // The dropped transaction was resubmitted. + assert_eq!(events.get(&future_xt).unwrap(), &vec![TxStatusTypeTest::Ready]); +} diff --git a/substrate/client/rpc-spec-v2/src/transaction/transaction.rs b/substrate/client/rpc-spec-v2/src/transaction/transaction.rs index 17889b3bad2a55794cc191514106fe02274512b6..d44006392dca4a05a6c9f5bbf872c75bc7d5ee52 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/transaction.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/transaction.rs @@ -22,10 +22,7 @@ use crate::{ transaction::{ api::TransactionApiServer, error::Error, - event::{ - TransactionBlock, TransactionBroadcasted, TransactionDropped, TransactionError, - TransactionEvent, - }, + event::{TransactionBlock, TransactionDropped, TransactionError, TransactionEvent}, }, SubscriptionTaskExecutor, }; @@ -113,9 +110,7 @@ where match submit.await { Ok(stream) => { - let mut state = TransactionState::new(); - let stream = - stream.filter_map(move |event| async move { state.handle_event(event) }); + let stream = stream.filter_map(move |event| async move { handle_event(event) }); pipe_from_stream(pending, stream.boxed()).await; }, Err(err) => { @@ -131,66 +126,34 @@ where } } -/// The transaction's state that needs to be preserved between -/// multiple events generated by the transaction-pool. -/// -/// # Note -/// -/// In the future, the RPC server can submit only the last event when multiple -/// identical events happen in a row. -#[derive(Clone, Copy)] -struct TransactionState { - /// True if the transaction was previously broadcasted. - broadcasted: bool, -} - -impl TransactionState { - /// Construct a new [`TransactionState`]. - pub fn new() -> Self { - TransactionState { broadcasted: false } - } - - /// Handle events generated by the transaction-pool and convert them - /// to the new API expected state. - #[inline] - pub fn handle_event( - &mut self, - event: TransactionStatus, - ) -> Option> { - match event { - TransactionStatus::Ready | TransactionStatus::Future => - Some(TransactionEvent::::Validated), - TransactionStatus::Broadcast(peers) => { - // Set the broadcasted flag once if we submitted the transaction to - // at least one peer. - self.broadcasted = self.broadcasted || !peers.is_empty(); - - Some(TransactionEvent::Broadcasted(TransactionBroadcasted { - num_peers: peers.len(), - })) - }, - TransactionStatus::InBlock((hash, index)) => - Some(TransactionEvent::BestChainBlockIncluded(Some(TransactionBlock { - hash, - index, - }))), - TransactionStatus::Retracted(_) => Some(TransactionEvent::BestChainBlockIncluded(None)), - TransactionStatus::FinalityTimeout(_) => - Some(TransactionEvent::Dropped(TransactionDropped { - broadcasted: self.broadcasted, - error: "Maximum number of finality watchers has been reached".into(), - })), - TransactionStatus::Finalized((hash, index)) => - Some(TransactionEvent::Finalized(TransactionBlock { hash, index })), - TransactionStatus::Usurped(_) => Some(TransactionEvent::Invalid(TransactionError { - error: "Extrinsic was rendered invalid by another extrinsic".into(), - })), - TransactionStatus::Dropped => Some(TransactionEvent::Invalid(TransactionError { - error: "Extrinsic dropped from the pool due to exceeding limits".into(), - })), - TransactionStatus::Invalid => Some(TransactionEvent::Invalid(TransactionError { - error: "Extrinsic marked as invalid".into(), +/// Handle events generated by the transaction-pool and convert them +/// to the new API expected state. +#[inline] +pub fn handle_event( + event: TransactionStatus, +) -> Option> { + match event { + TransactionStatus::Ready | TransactionStatus::Future => + Some(TransactionEvent::::Validated), + TransactionStatus::InBlock((hash, index)) => + Some(TransactionEvent::BestChainBlockIncluded(Some(TransactionBlock { hash, index }))), + TransactionStatus::Retracted(_) => Some(TransactionEvent::BestChainBlockIncluded(None)), + TransactionStatus::FinalityTimeout(_) => + Some(TransactionEvent::Dropped(TransactionDropped { + error: "Maximum number of finality watchers has been reached".into(), })), - } + TransactionStatus::Finalized((hash, index)) => + Some(TransactionEvent::Finalized(TransactionBlock { hash, index })), + TransactionStatus::Usurped(_) => Some(TransactionEvent::Invalid(TransactionError { + error: "Extrinsic was rendered invalid by another extrinsic".into(), + })), + TransactionStatus::Dropped => Some(TransactionEvent::Invalid(TransactionError { + error: "Extrinsic dropped from the pool due to exceeding limits".into(), + })), + TransactionStatus::Invalid => Some(TransactionEvent::Invalid(TransactionError { + error: "Extrinsic marked as invalid".into(), + })), + // These are the events that are not supported by the new API. + TransactionStatus::Broadcast(_) => None, } } diff --git a/substrate/client/rpc/Cargo.toml b/substrate/client/rpc/Cargo.toml index aa97eb87bb59d9fede43034410a99b9a1da3d52b..f65e6c9a59ec1c6c593d00c7639ca4bef26e4319 100644 --- a/substrate/client/rpc/Cargo.toml +++ b/substrate/client/rpc/Cargo.toml @@ -21,7 +21,7 @@ futures = "0.3.21" jsonrpsee = { version = "0.22", features = ["server"] } log = { workspace = true, default-features = true } parking_lot = "0.12.1" -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } sc-block-builder = { path = "../block-builder" } sc-chain-spec = { path = "../chain-spec" } sc-client-api = { path = "../api" } diff --git a/substrate/client/rpc/src/state/tests.rs b/substrate/client/rpc/src/state/tests.rs index 96f4c1be960faa5f15508f19a9dd5665d8b684c1..dd866e671c5095dca9b5851111340a4e32f71e67 100644 --- a/substrate/client/rpc/src/state/tests.rs +++ b/substrate/client/rpc/src/state/tests.rs @@ -475,7 +475,7 @@ async fn should_return_runtime_version() { // it is basically json-encoded substrate_test_runtime_client::runtime::VERSION let result = "{\"specName\":\"test\",\"implName\":\"parity-test\",\"authoringVersion\":1,\ - \"specVersion\":2,\"implVersion\":2,\"apis\":[[\"0xdf6acb689907609b\",4],\ + \"specVersion\":2,\"implVersion\":2,\"apis\":[[\"0xdf6acb689907609b\",5],\ [\"0x37e397fc7c91f5e4\",2],[\"0xd2bc9897eed08f15\",3],[\"0x40fe3ad401f8959a\",6],\ [\"0xbc9d89904f5b923f\",1],[\"0xc6e9a76309f39b09\",2],[\"0xdd718d5cc53262d4\",1],\ [\"0xcbca25e39f142387\",2],[\"0xf78b278be53f454c\",2],[\"0xab3c0572291feb8b\",1],\ diff --git a/substrate/client/service/Cargo.toml b/substrate/client/service/Cargo.toml index a6fa360b74f87498578a44dc9e29167afa3f6051..bbf67d1fbd0af6dceada3b73178004558044d0b2 100644 --- a/substrate/client/service/Cargo.toml +++ b/substrate/client/service/Cargo.toml @@ -29,7 +29,7 @@ runtime-benchmarks = [ [dependencies] jsonrpsee = { version = "0.22", features = ["server"] } -thiserror = "1.0.48" +thiserror = { workspace = true } futures = "0.3.21" rand = "0.8.5" parking_lot = "0.12.1" @@ -37,8 +37,8 @@ log = { workspace = true, default-features = true } futures-timer = "3.0.1" exit-future = "0.2.0" pin-project = "1.0.12" -serde = "1.0.196" -serde_json = "1.0.113" +serde = { workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } sc-keystore = { path = "../keystore" } sp-runtime = { path = "../../primitives/runtime" } sp-trie = { path = "../../primitives/trie" } @@ -84,6 +84,7 @@ tokio = { version = "1.22.0", features = ["parking_lot", "rt-multi-thread", "tim tempfile = "3.1.0" directories = "5.0.1" static_init = "1.0.3" +schnellru = "0.2.1" [dev-dependencies] substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index aa9c1b80a29a95bd77efbda35620c132b624bd9b..35e8b53a09cf2d802a0c4a873f7ecb8099764e83 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -19,8 +19,8 @@ //! Substrate Client use super::block_rules::{BlockRules, LookupResult as BlockLookupResult}; -use futures::{FutureExt, StreamExt}; -use log::{error, info, trace, warn}; +use crate::client::notification_pinning::NotificationPinningWorker; +use log::{debug, info, trace, warn}; use parking_lot::{Mutex, RwLock}; use prometheus_endpoint::Registry; use rand::Rng; @@ -38,7 +38,7 @@ use sc_client_api::{ execution_extensions::ExecutionExtensions, notifications::{StorageEventStream, StorageNotifications}, CallExecutor, ExecutorProvider, KeysIter, OnFinalityAction, OnImportAction, PairsIter, - ProofProvider, UsageProvider, + ProofProvider, UnpinWorkerMessage, UsageProvider, }; use sc_consensus::{ BlockCheckParams, BlockImportParams, ForkChoiceStrategy, ImportResult, StateAction, @@ -114,7 +114,7 @@ where block_rules: BlockRules, config: ClientConfig, telemetry: Option, - unpin_worker_sender: TracingUnboundedSender, + unpin_worker_sender: TracingUnboundedSender>, _phantom: PhantomData, } @@ -326,19 +326,35 @@ where // dropped, the block will be unpinned automatically. if let Some(ref notification) = finality_notification { if let Err(err) = self.backend.pin_block(notification.hash) { - error!( + debug!( "Unable to pin block for finality notification. hash: {}, Error: {}", notification.hash, err ); - }; + } else { + let _ = self + .unpin_worker_sender + .unbounded_send(UnpinWorkerMessage::AnnouncePin(notification.hash)) + .map_err(|e| { + log::error!( + "Unable to send AnnouncePin worker message for finality: {e}" + ) + }); + } } if let Some(ref notification) = import_notification { if let Err(err) = self.backend.pin_block(notification.hash) { - error!( + debug!( "Unable to pin block for import notification. hash: {}, Error: {}", notification.hash, err ); + } else { + let _ = self + .unpin_worker_sender + .unbounded_send(UnpinWorkerMessage::AnnouncePin(notification.hash)) + .map_err(|e| { + log::error!("Unable to send AnnouncePin worker message for import: {e}") + }); }; } @@ -416,25 +432,12 @@ where backend.commit_operation(op)?; } - let (unpin_worker_sender, mut rx) = - tracing_unbounded::("unpin-worker-channel", 10_000); - let task_backend = Arc::downgrade(&backend); - spawn_handle.spawn( - "unpin-worker", - None, - async move { - while let Some(message) = rx.next().await { - if let Some(backend) = task_backend.upgrade() { - backend.unpin_block(message); - } else { - log::debug!("Terminating unpin-worker, backend reference was dropped."); - return - } - } - log::debug!("Terminating unpin-worker, stream terminated.") - } - .boxed(), + let (unpin_worker_sender, rx) = tracing_unbounded::>( + "notification-pinning-worker-channel", + 10_000, ); + let unpin_worker = NotificationPinningWorker::new(rx, backend.clone()); + spawn_handle.spawn("notification-pinning-worker", None, Box::pin(unpin_worker.run())); Ok(Client { backend, @@ -675,8 +678,10 @@ where // This is use by fast sync for runtime version to be resolvable from // changes. - let state_version = - resolve_state_version_from_wasm(&storage, &self.executor)?; + let state_version = resolve_state_version_from_wasm::<_, HashingFor>( + &storage, + &self.executor, + )?; let state_root = operation.op.reset_storage(storage, state_version)?; if state_root != *import_headers.post().state_root() { // State root mismatch when importing state. This should not happen in diff --git a/substrate/client/service/src/client/mod.rs b/substrate/client/service/src/client/mod.rs index a13fd4317e1553d379ea068c516e74072d3c8c95..0703cc2b47d144d4e67418cfb9966cd1cd209392 100644 --- a/substrate/client/service/src/client/mod.rs +++ b/substrate/client/service/src/client/mod.rs @@ -47,6 +47,7 @@ mod block_rules; mod call_executor; mod client; +mod notification_pinning; mod wasm_override; mod wasm_substitutes; diff --git a/substrate/client/service/src/client/notification_pinning.rs b/substrate/client/service/src/client/notification_pinning.rs new file mode 100644 index 0000000000000000000000000000000000000000..80de91c02f1ae0273d42edc79d442aa312d09356 --- /dev/null +++ b/substrate/client/service/src/client/notification_pinning.rs @@ -0,0 +1,353 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Notification pinning related logic. +//! +//! This file contains a worker that should be started when a new client instance is created. +//! The goal is to avoid pruning of blocks that have active notifications in the node. Every +//! recipient of notifications should receive the chance to act upon them. In addition, notification +//! listeners can hold onto a [`sc_client_api::UnpinHandle`] to keep a block pinned. Once the handle +//! is dropped, a message is sent and the worker unpins the respective block. +use std::{ + marker::PhantomData, + sync::{Arc, Weak}, +}; + +use futures::StreamExt; +use sc_client_api::{Backend, UnpinWorkerMessage}; + +use sc_utils::mpsc::TracingUnboundedReceiver; +use schnellru::Limiter; +use sp_runtime::traits::Block as BlockT; + +const LOG_TARGET: &str = "db::notification_pinning"; +const NOTIFICATION_PINNING_LIMIT: usize = 1024; + +/// A limiter which automatically unpins blocks that leave the data structure. +#[derive(Clone, Debug)] +struct UnpinningByLengthLimiter> { + max_length: usize, + backend: Weak, + _phantom: PhantomData, +} + +impl> UnpinningByLengthLimiter { + /// Creates a new length limiter with a given `max_length`. + pub fn new(max_length: usize, backend: Weak) -> UnpinningByLengthLimiter { + UnpinningByLengthLimiter { max_length, backend, _phantom: PhantomData::::default() } + } +} + +impl> Limiter + for UnpinningByLengthLimiter +{ + type KeyToInsert<'a> = Block::Hash; + type LinkType = usize; + + fn is_over_the_limit(&self, length: usize) -> bool { + length > self.max_length + } + + fn on_insert( + &mut self, + _length: usize, + key: Self::KeyToInsert<'_>, + value: u32, + ) -> Option<(Block::Hash, u32)> { + log::debug!(target: LOG_TARGET, "Pinning block based on notification. hash = {key}"); + if self.max_length > 0 { + Some((key, value)) + } else { + None + } + } + + fn on_replace( + &mut self, + _length: usize, + _old_key: &mut Block::Hash, + _new_key: Block::Hash, + _old_value: &mut u32, + _new_value: &mut u32, + ) -> bool { + true + } + + fn on_removed(&mut self, key: &mut Block::Hash, references: &mut u32) { + // If reference count was larger than 0 on removal, + // the item was removed due to capacity limitations. + // Since the cache should be large enough for pinned items, + // we want to know about these evictions. + if *references > 0 { + log::warn!( + target: LOG_TARGET, + "Notification block pinning limit reached. Unpinning block with hash = {key:?}" + ); + if let Some(backend) = self.backend.upgrade() { + (0..*references).for_each(|_| backend.unpin_block(*key)); + } + } else { + log::trace!( + target: LOG_TARGET, + "Unpinned block. hash = {key:?}", + ) + } + } + + fn on_cleared(&mut self) {} + + fn on_grow(&mut self, _new_memory_usage: usize) -> bool { + true + } +} + +/// Worker for the handling of notification pinning. +/// +/// It receives messages from a receiver and pins/unpins based on the incoming messages. +/// All notification related unpinning should go through this worker. If the maximum number of +/// notification pins is reached, the block from the oldest notification is unpinned. +pub struct NotificationPinningWorker> { + unpin_message_rx: TracingUnboundedReceiver>, + task_backend: Weak, + pinned_blocks: schnellru::LruMap>, +} + +impl> NotificationPinningWorker { + /// Creates a new `NotificationPinningWorker`. + pub fn new( + unpin_message_rx: TracingUnboundedReceiver>, + task_backend: Arc, + ) -> Self { + let pinned_blocks = + schnellru::LruMap::>::new( + UnpinningByLengthLimiter::new( + NOTIFICATION_PINNING_LIMIT, + Arc::downgrade(&task_backend), + ), + ); + Self { unpin_message_rx, task_backend: Arc::downgrade(&task_backend), pinned_blocks } + } + + fn handle_announce_message(&mut self, hash: Block::Hash) { + if let Some(entry) = self.pinned_blocks.get_or_insert(hash, Default::default) { + *entry = *entry + 1; + } + } + + fn handle_unpin_message(&mut self, hash: Block::Hash) -> Result<(), ()> { + if let Some(refcount) = self.pinned_blocks.peek_mut(&hash) { + *refcount = *refcount - 1; + if *refcount == 0 { + self.pinned_blocks.remove(&hash); + } + if let Some(backend) = self.task_backend.upgrade() { + log::debug!(target: LOG_TARGET, "Reducing pinning refcount for block hash = {hash:?}"); + backend.unpin_block(hash); + } else { + log::debug!(target: LOG_TARGET, "Terminating unpin-worker, backend reference was dropped."); + return Err(()) + } + } else { + log::debug!(target: LOG_TARGET, "Received unpin message for already unpinned block. hash = {hash:?}"); + } + Ok(()) + } + + /// Start working on the received messages. + /// + /// The worker maintains a map which keeps track of the pinned blocks and their reference count. + /// Depending upon the received message, it acts to pin/unpin the block. + pub async fn run(mut self) { + while let Some(message) = self.unpin_message_rx.next().await { + match message { + UnpinWorkerMessage::AnnouncePin(hash) => self.handle_announce_message(hash), + UnpinWorkerMessage::Unpin(hash) => + if self.handle_unpin_message(hash).is_err() { + return + }, + } + } + log::debug!(target: LOG_TARGET, "Terminating unpin-worker, stream terminated.") + } +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use sc_client_api::{Backend, UnpinWorkerMessage}; + use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver}; + use sp_core::H256; + use sp_runtime::traits::Block as BlockT; + + type Block = substrate_test_runtime_client::runtime::Block; + + use super::{NotificationPinningWorker, UnpinningByLengthLimiter}; + + impl> NotificationPinningWorker { + fn new_with_limit( + unpin_message_rx: TracingUnboundedReceiver>, + task_backend: Arc, + limit: usize, + ) -> Self { + let pinned_blocks = + schnellru::LruMap::>::new( + UnpinningByLengthLimiter::new(limit, Arc::downgrade(&task_backend)), + ); + Self { unpin_message_rx, task_backend: Arc::downgrade(&task_backend), pinned_blocks } + } + + fn lru( + &self, + ) -> &schnellru::LruMap> { + &self.pinned_blocks + } + } + + #[test] + fn pinning_worker_handles_base_case() { + let (_tx, rx) = tracing_unbounded("testing", 1000); + + let backend = Arc::new(sc_client_api::in_mem::Backend::::new()); + + let hash = H256::random(); + + let mut worker = NotificationPinningWorker::new(rx, backend.clone()); + + // Block got pinned and unpin message should unpin in the backend. + let _ = backend.pin_block(hash); + assert_eq!(backend.pin_refs(&hash), Some(1)); + + worker.handle_announce_message(hash); + assert_eq!(worker.lru().len(), 1); + + let _ = worker.handle_unpin_message(hash); + + assert_eq!(backend.pin_refs(&hash), Some(0)); + assert!(worker.lru().is_empty()); + } + + #[test] + fn pinning_worker_handles_multiple_pins() { + let (_tx, rx) = tracing_unbounded("testing", 1000); + + let backend = Arc::new(sc_client_api::in_mem::Backend::::new()); + + let hash = H256::random(); + + let mut worker = NotificationPinningWorker::new(rx, backend.clone()); + // Block got pinned multiple times. + let _ = backend.pin_block(hash); + let _ = backend.pin_block(hash); + let _ = backend.pin_block(hash); + assert_eq!(backend.pin_refs(&hash), Some(3)); + + worker.handle_announce_message(hash); + worker.handle_announce_message(hash); + worker.handle_announce_message(hash); + assert_eq!(worker.lru().len(), 1); + + let _ = worker.handle_unpin_message(hash); + assert_eq!(backend.pin_refs(&hash), Some(2)); + let _ = worker.handle_unpin_message(hash); + assert_eq!(backend.pin_refs(&hash), Some(1)); + let _ = worker.handle_unpin_message(hash); + assert_eq!(backend.pin_refs(&hash), Some(0)); + assert!(worker.lru().is_empty()); + + let _ = worker.handle_unpin_message(hash); + assert_eq!(backend.pin_refs(&hash), Some(0)); + } + + #[test] + fn pinning_worker_handles_too_many_unpins() { + let (_tx, rx) = tracing_unbounded("testing", 1000); + + let backend = Arc::new(sc_client_api::in_mem::Backend::::new()); + + let hash = H256::random(); + let hash2 = H256::random(); + + let mut worker = NotificationPinningWorker::new(rx, backend.clone()); + // Block was announced once but unpinned multiple times. The worker should ignore the + // additional unpins. + let _ = backend.pin_block(hash); + let _ = backend.pin_block(hash); + let _ = backend.pin_block(hash); + assert_eq!(backend.pin_refs(&hash), Some(3)); + + worker.handle_announce_message(hash); + assert_eq!(worker.lru().len(), 1); + + let _ = worker.handle_unpin_message(hash); + assert_eq!(backend.pin_refs(&hash), Some(2)); + let _ = worker.handle_unpin_message(hash); + assert_eq!(backend.pin_refs(&hash), Some(2)); + assert!(worker.lru().is_empty()); + + let _ = worker.handle_unpin_message(hash2); + assert!(worker.lru().is_empty()); + assert_eq!(backend.pin_refs(&hash2), None); + } + + #[test] + fn pinning_worker_should_evict_when_limit_reached() { + let (_tx, rx) = tracing_unbounded("testing", 1000); + + let backend = Arc::new(sc_client_api::in_mem::Backend::::new()); + + let hash1 = H256::random(); + let hash2 = H256::random(); + let hash3 = H256::random(); + let hash4 = H256::random(); + + // Only two items fit into the cache. + let mut worker = NotificationPinningWorker::new_with_limit(rx, backend.clone(), 2); + + // Multiple blocks are announced but the cache size is too small. We expect that blocks + // are evicted by the cache and unpinned in the backend. + let _ = backend.pin_block(hash1); + let _ = backend.pin_block(hash2); + let _ = backend.pin_block(hash3); + assert_eq!(backend.pin_refs(&hash1), Some(1)); + assert_eq!(backend.pin_refs(&hash2), Some(1)); + assert_eq!(backend.pin_refs(&hash3), Some(1)); + + worker.handle_announce_message(hash1); + assert!(worker.lru().peek(&hash1).is_some()); + worker.handle_announce_message(hash2); + assert!(worker.lru().peek(&hash2).is_some()); + worker.handle_announce_message(hash3); + assert!(worker.lru().peek(&hash3).is_some()); + assert!(worker.lru().peek(&hash2).is_some()); + assert_eq!(worker.lru().len(), 2); + + // Hash 1 should have gotten unpinned, since its oldest. + assert_eq!(backend.pin_refs(&hash1), Some(0)); + assert_eq!(backend.pin_refs(&hash2), Some(1)); + assert_eq!(backend.pin_refs(&hash3), Some(1)); + + // Hash 2 is getting bumped. + worker.handle_announce_message(hash2); + assert_eq!(worker.lru().peek(&hash2), Some(&2)); + + // Since hash 2 was accessed, evict hash 3. + worker.handle_announce_message(hash4); + assert_eq!(worker.lru().peek(&hash3), None); + } +} diff --git a/substrate/client/service/src/config.rs b/substrate/client/service/src/config.rs index 74c5dd775b0eeca74e4cba35f5816448436cd68a..35262ff493b44f07a94f1d35f1a3e765251a1897 100644 --- a/substrate/client/service/src/config.rs +++ b/substrate/client/service/src/config.rs @@ -18,6 +18,7 @@ //! Service configuration. +pub use jsonrpsee::server::BatchRequestConfig as RpcBatchRequestConfig; use prometheus_endpoint::Registry; use sc_chain_spec::ChainSpec; pub use sc_client_db::{BlocksPruning, Database, DatabaseSource, PruningMode}; @@ -103,6 +104,8 @@ pub struct Configuration { pub rpc_port: u16, /// The number of messages the JSON-RPC server is allowed to keep in memory. pub rpc_message_buffer_capacity: u32, + /// JSON-RPC server batch config. + pub rpc_batch_config: RpcBatchRequestConfig, /// RPC rate limit per minute. pub rpc_rate_limit: Option, /// Prometheus endpoint configuration. `None` if disabled. diff --git a/substrate/client/service/src/lib.rs b/substrate/client/service/src/lib.rs index d0adf4f7d5c561707d061855732c7262b838a8fc..9480d4a0b07276a5131be17579964c04abda1f1d 100644 --- a/substrate/client/service/src/lib.rs +++ b/substrate/client/service/src/lib.rs @@ -393,6 +393,7 @@ where let server_config = sc_rpc_server::Config { addrs: [addr, backup_addr], + batch_config: config.rpc_batch_config, max_connections: config.rpc_max_connections, max_payload_in_mb: config.rpc_max_request_size, max_payload_out_mb: config.rpc_max_response_size, diff --git a/substrate/client/service/test/src/lib.rs b/substrate/client/service/test/src/lib.rs index 6148bb05fcfbf99323c8b92d2c369c201094e373..349538965ee1fa04bd0acfff42b517ae40452c1c 100644 --- a/substrate/client/service/test/src/lib.rs +++ b/substrate/client/service/test/src/lib.rs @@ -29,7 +29,7 @@ use sc_network::{ use sc_network_sync::SyncingService; use sc_service::{ client::Client, - config::{BasePath, DatabaseSource, KeystoreConfig}, + config::{BasePath, DatabaseSource, KeystoreConfig, RpcBatchRequestConfig}, BlocksPruning, ChainSpecExtension, Configuration, Error, GenericChainSpec, Role, RuntimeGenesis, SpawnTaskHandle, TaskManager, }; @@ -254,6 +254,7 @@ fn node_config< rpc_max_subs_per_conn: Default::default(), rpc_port: 9944, rpc_message_buffer_capacity: Default::default(), + rpc_batch_config: RpcBatchRequestConfig::Unlimited, rpc_rate_limit: None, prometheus_config: None, telemetry_endpoints: None, diff --git a/substrate/client/storage-monitor/Cargo.toml b/substrate/client/storage-monitor/Cargo.toml index b0f0464414216136ce58c14f758cba11f2f6fd1b..b2120b3efc4396801bdbcaf0a7867ab6692908d3 100644 --- a/substrate/client/storage-monitor/Cargo.toml +++ b/substrate/client/storage-monitor/Cargo.toml @@ -17,4 +17,4 @@ log = { workspace = true, default-features = true } fs4 = "0.7.0" sp-core = { path = "../../primitives/core" } tokio = { version = "1.22.0", features = ["time"] } -thiserror = "1.0.48" +thiserror = { workspace = true } diff --git a/substrate/client/sync-state-rpc/Cargo.toml b/substrate/client/sync-state-rpc/Cargo.toml index fc0efd6630ca87324695d535a3d6522040ae1444..09dc611caa044debeabce4f15686465535f076ed 100644 --- a/substrate/client/sync-state-rpc/Cargo.toml +++ b/substrate/client/sync-state-rpc/Cargo.toml @@ -17,9 +17,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } jsonrpsee = { version = "0.22", features = ["client-core", "macros", "server"] } -serde = { version = "1.0.196", features = ["derive"] } -serde_json = "1.0.113" -thiserror = "1.0.48" +serde = { features = ["derive"], workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } +thiserror = { workspace = true } sc-chain-spec = { path = "../chain-spec" } sc-client-api = { path = "../api" } sc-consensus-babe = { path = "../consensus/babe" } diff --git a/substrate/client/sysinfo/Cargo.toml b/substrate/client/sysinfo/Cargo.toml index ede0bccaedc8a74bd06a64b87d621dd89751595a..ba58452ffb58bfd737235beb285e6e46c992967d 100644 --- a/substrate/client/sysinfo/Cargo.toml +++ b/substrate/client/sysinfo/Cargo.toml @@ -24,8 +24,8 @@ rand = "0.8.5" rand_pcg = "0.3.1" derive_more = "0.99" regex = "1" -serde = { version = "1.0.196", features = ["derive"] } -serde_json = "1.0.113" +serde = { features = ["derive"], workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } sc-telemetry = { path = "../telemetry" } sp-core = { path = "../../primitives/core" } sp-crypto-hashing = { path = "../../primitives/crypto/hashing" } diff --git a/substrate/client/telemetry/Cargo.toml b/substrate/client/telemetry/Cargo.toml index e35fbc16142e9271d3c6bb6a57311aa1401f3800..8ab00202f0ba6828dbf210e44d6337393ba1d391 100644 --- a/substrate/client/telemetry/Cargo.toml +++ b/substrate/client/telemetry/Cargo.toml @@ -25,7 +25,7 @@ parking_lot = "0.12.1" pin-project = "1.0.12" sc-utils = { path = "../utils" } rand = "0.8.5" -serde = { version = "1.0.196", features = ["derive"] } -serde_json = "1.0.113" -thiserror = "1.0.48" +serde = { features = ["derive"], workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } +thiserror = { workspace = true } wasm-timer = "0.2.5" diff --git a/substrate/client/tracing/Cargo.toml b/substrate/client/tracing/Cargo.toml index bd5a661bb3a38bb0313972430ddd307392df7d82..61e6f7d0bab5b2422b3354edb10e1753d3ef1e7a 100644 --- a/substrate/client/tracing/Cargo.toml +++ b/substrate/client/tracing/Cargo.toml @@ -26,8 +26,8 @@ log = { workspace = true, default-features = true } parking_lot = "0.12.1" regex = "1.6.0" rustc-hash = "1.1.0" -serde = "1.0.196" -thiserror = "1.0.48" +serde = { workspace = true, default-features = true } +thiserror = { workspace = true } tracing = "0.1.29" tracing-log = "0.1.3" tracing-subscriber = { version = "0.2.25", features = ["parking_lot"] } diff --git a/substrate/client/tracing/proc-macro/Cargo.toml b/substrate/client/tracing/proc-macro/Cargo.toml index aa6ae7d6fd4ac5dda2ebd055e029e27fadd3d17f..fec34aa0bca935e22f0f7d7faea17cac5d8f10bb 100644 --- a/substrate/client/tracing/proc-macro/Cargo.toml +++ b/substrate/client/tracing/proc-macro/Cargo.toml @@ -20,5 +20,5 @@ proc-macro = true [dependencies] proc-macro-crate = "3.0.0" proc-macro2 = "1.0.56" -quote = { version = "1.0.28", features = ["proc-macro"] } -syn = { version = "2.0.49", features = ["extra-traits", "full", "parsing", "proc-macro"] } +quote = { features = ["proc-macro"], workspace = true } +syn = { features = ["extra-traits", "full", "parsing", "proc-macro"], workspace = true } diff --git a/substrate/client/transaction-pool/Cargo.toml b/substrate/client/transaction-pool/Cargo.toml index c7a2540f8ed4b441cd969a8ca7f9dd27a55a6df6..2ca37afd61b84a7228852290d5075841c993a503 100644 --- a/substrate/client/transaction-pool/Cargo.toml +++ b/substrate/client/transaction-pool/Cargo.toml @@ -23,8 +23,8 @@ futures-timer = "3.0.2" linked-hash-map = "0.5.4" log = { workspace = true, default-features = true } parking_lot = "0.12.1" -serde = { version = "1.0.196", features = ["derive"] } -thiserror = "1.0.48" +serde = { features = ["derive"], workspace = true, default-features = true } +thiserror = { workspace = true } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } sc-client-api = { path = "../api" } sc-transaction-pool-api = { path = "api" } diff --git a/substrate/client/transaction-pool/api/Cargo.toml b/substrate/client/transaction-pool/api/Cargo.toml index 79449b2ee87cd097489aa9571abef75acde84076..d52e4783fabce48992dec01e83c61bc7a3074a20 100644 --- a/substrate/client/transaction-pool/api/Cargo.toml +++ b/substrate/client/transaction-pool/api/Cargo.toml @@ -16,11 +16,11 @@ async-trait = "0.1.74" codec = { package = "parity-scale-codec", version = "3.6.1" } futures = "0.3.21" log = { workspace = true, default-features = true } -serde = { version = "1.0.196", features = ["derive"] } -thiserror = "1.0.48" +serde = { features = ["derive"], workspace = true, default-features = true } +thiserror = { workspace = true } sp-blockchain = { path = "../../../primitives/blockchain" } sp-core = { path = "../../../primitives/core", default-features = false } sp-runtime = { path = "../../../primitives/runtime", default-features = false } [dev-dependencies] -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } diff --git a/substrate/client/transaction-pool/src/lib.rs b/substrate/client/transaction-pool/src/lib.rs index faa3f455a580c8713ced21b14818da874fd972a7..64b301e6bf36f4f4fc88abf15a36c5b1ef30b04b 100644 --- a/substrate/client/transaction-pool/src/lib.rs +++ b/substrate/client/transaction-pool/src/lib.rs @@ -164,8 +164,9 @@ where pool_api: Arc, best_block_hash: Block::Hash, finalized_hash: Block::Hash, + options: graph::Options, ) -> (Self, Pin + Send>>) { - let pool = Arc::new(graph::Pool::new(Default::default(), true.into(), pool_api.clone())); + let pool = Arc::new(graph::Pool::new(options, true.into(), pool_api.clone())); let (revalidation_queue, background_task) = revalidation::RevalidationQueue::new_background( pool_api.clone(), pool.clone(), diff --git a/substrate/client/transaction-pool/tests/pool.rs b/substrate/client/transaction-pool/tests/pool.rs index 6b1a197440c11e69805465ed4c7195f07eaafc0f..461b9860d414a3495dc34bf4f149963fa4d0a903 100644 --- a/substrate/client/transaction-pool/tests/pool.rs +++ b/substrate/client/transaction-pool/tests/pool.rs @@ -73,7 +73,7 @@ fn create_basic_pool_with_genesis( .map(|blocks| blocks[0].0.header.hash()) .expect("there is block 0. qed") }; - BasicPool::new_test(test_api, genesis_hash, genesis_hash) + BasicPool::new_test(test_api, genesis_hash, genesis_hash, Default::default()) } fn create_basic_pool(test_api: TestApi) -> BasicPool { @@ -994,6 +994,7 @@ fn import_notification_to_pool_maintain_works() { )), best_hash, finalized_hash, + Default::default(), ) .0, ); diff --git a/substrate/frame/Cargo.toml b/substrate/frame/Cargo.toml index 3f148bf4c83bfcbcf1892bdb0ebb83abb70b9cce..6746723e72f79498cf764199866936b93373858e 100644 --- a/substrate/frame/Cargo.toml +++ b/substrate/frame/Cargo.toml @@ -19,8 +19,12 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # external deps -parity-scale-codec = { version = "3.2.2", default-features = false, features = ["derive"] } -scale-info = { version = "2.6.0", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.2.2", default-features = false, features = [ + "derive", +] } +scale-info = { version = "2.6.0", default-features = false, features = [ + "derive", +] } # primitive deps, used for developing FRAME pallets. sp-runtime = { default-features = false, path = "../primitives/runtime" } @@ -57,8 +61,6 @@ pallet-examples = { path = "./examples" } default = ["runtime", "std"] experimental = ["frame-support/experimental"] runtime = [ - "frame-executive", - "frame-system-rpc-runtime-api", "sp-api", "sp-block-builder", "sp-consensus-aura", @@ -68,6 +70,9 @@ runtime = [ "sp-session", "sp-transaction-pool", "sp-version", + + "frame-executive", + "frame-system-rpc-runtime-api", ] std = [ "frame-executive?/std", diff --git a/substrate/frame/alliance/src/benchmarking.rs b/substrate/frame/alliance/src/benchmarking.rs index 9fe0e29b42cd9526a1e221531dd5946846b17b90..4ccd0fc08f6ac1c8ac7e6009b555a470eb4a2779 100644 --- a/substrate/frame/alliance/src/benchmarking.rs +++ b/substrate/frame/alliance/src/benchmarking.rs @@ -26,7 +26,7 @@ use core::{ }; use sp_runtime::traits::{Bounded, Hash, StaticLookup}; -use frame_benchmarking::{account, impl_benchmark_test_suite, v2::*, BenchmarkError}; +use frame_benchmarking::{account, v2::*, BenchmarkError}; use frame_support::traits::{EnsureOrigin, Get, UnfilteredDispatchable}; use frame_system::{pallet_prelude::BlockNumberFor, Pallet as System, RawOrigin as SystemOrigin}; diff --git a/substrate/frame/alliance/src/migration.rs b/substrate/frame/alliance/src/migration.rs index e3a44a7887e976cfa6c5d0cfd48c251348edb26b..432f09a16f47772ead741a0f312eefeb795e2d69 100644 --- a/substrate/frame/alliance/src/migration.rs +++ b/substrate/frame/alliance/src/migration.rs @@ -19,19 +19,19 @@ use crate::{Config, Pallet, Weight, LOG_TARGET}; use frame_support::{pallet_prelude::*, storage::migration, traits::OnRuntimeUpgrade}; use log; -/// The current storage version. +/// The in-code storage version. pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(2); /// Wrapper for all migrations of this pallet. pub fn migrate, I: 'static>() -> Weight { - let onchain_version = Pallet::::on_chain_storage_version(); + let on_chain_version = Pallet::::on_chain_storage_version(); let mut weight: Weight = Weight::zero(); - if onchain_version < 1 { + if on_chain_version < 1 { weight = weight.saturating_add(v0_to_v1::migrate::()); } - if onchain_version < 2 { + if on_chain_version < 2 { weight = weight.saturating_add(v1_to_v2::migrate::()); } diff --git a/substrate/frame/alliance/src/mock.rs b/substrate/frame/alliance/src/mock.rs index 2f0130f0e4d1a3ddea05606ca14525e64626cc70..2610f675f8c3bb2ef4e7015960e78e5ad24f3518 100644 --- a/substrate/frame/alliance/src/mock.rs +++ b/substrate/frame/alliance/src/mock.rs @@ -214,7 +214,7 @@ impl ProposalProvider for AllianceProposalProvider } fn proposal_of(proposal_hash: H256) -> Option { - AllianceMotion::proposal_of(proposal_hash) + pallet_collective::ProposalOf::::get(proposal_hash) } } diff --git a/substrate/frame/alliance/src/tests.rs b/substrate/frame/alliance/src/tests.rs index 8011627b237af15e8d30379bf14afc2564421050..710de5a54bc95a607b51f4078db94d7c24c92272 100644 --- a/substrate/frame/alliance/src/tests.rs +++ b/substrate/frame/alliance/src/tests.rs @@ -187,8 +187,8 @@ fn propose_works() { Box::new(proposal.clone()), proposal_len )); - assert_eq!(*AllianceMotion::proposals(), vec![hash]); - assert_eq!(AllianceMotion::proposal_of(&hash), Some(proposal)); + assert_eq!(*pallet_collective::Proposals::::get(), vec![hash]); + assert_eq!(pallet_collective::ProposalOf::::get(&hash), Some(proposal)); assert_eq!( System::events(), vec![EventRecord { diff --git a/substrate/frame/asset-conversion/src/lib.rs b/substrate/frame/asset-conversion/src/lib.rs index f0695678fbddf07a4943c4ad0982ac95c4634866..c9725f9d39d68f8fd8fbb8257a802975f9b9589b 100644 --- a/substrate/frame/asset-conversion/src/lib.rs +++ b/substrate/frame/asset-conversion/src/lib.rs @@ -40,7 +40,7 @@ //! non-native asset 1, you would pass in a path of `[DOT, 1]` or `[1, DOT]`. If you want to swap //! from non-native asset 1 to non-native asset 2, you would pass in a path of `[1, DOT, 2]`. //! -//! (For an example of configuring this pallet to use `MultiLocation` as an asset id, see the +//! (For an example of configuring this pallet to use `Location` as an asset id, see the //! cumulus repo). //! //! Here is an example `state_call` that asks for a quote of a pool of native versus asset 1: @@ -413,6 +413,11 @@ pub mod pallet { /// Params `amount1_min`/`amount2_min` represent that. /// `mint_to` will be sent the liquidity tokens that represent this share of the pool. /// + /// NOTE: when encountering an incorrect exchange rate and non-withdrawable pool liquidity, + /// batch an atomic call with [`Pallet::add_liquidity`] and + /// [`Pallet::swap_exact_tokens_for_tokens`] or [`Pallet::swap_tokens_for_exact_tokens`] + /// calls to render the liquidity withdrawable and rectify the exchange rate. + /// /// Once liquidity is added, someone may successfully call /// [`Pallet::swap_exact_tokens_for_tokens`] successfully. #[pallet::call_index(1)] diff --git a/substrate/frame/asset-rate/src/lib.rs b/substrate/frame/asset-rate/src/lib.rs index d4afca8b73c4b2978bbed954c05aee4e2a645ee5..befabfe54aa0e766d498eccf2201de4e9c0f2fc9 100644 --- a/substrate/frame/asset-rate/src/lib.rs +++ b/substrate/frame/asset-rate/src/lib.rs @@ -59,8 +59,14 @@ #![cfg_attr(not(feature = "std"), no_std)] -use frame_support::traits::{fungible::Inspect, tokens::ConversionFromAssetBalance}; -use sp_runtime::{traits::Zero, FixedPointNumber, FixedU128}; +use frame_support::traits::{ + fungible::Inspect, + tokens::{ConversionFromAssetBalance, ConversionToAssetBalance}, +}; +use sp_runtime::{ + traits::{CheckedDiv, Zero}, + FixedPointNumber, FixedU128, +}; use sp_std::boxed::Box; pub use pallet::*; @@ -144,6 +150,8 @@ pub mod pallet { UnknownAssetKind, /// The given asset ID already has an assigned conversion rate and cannot be re-created. AlreadyExists, + /// Overflow ocurred when calculating the inverse rate. + Overflow, } #[pallet::call] @@ -246,3 +254,25 @@ where pallet::ConversionRateToNative::::set(asset_id.clone(), Some(1.into())); } } + +/// Exposes conversion of a native balance to an asset balance. +impl ConversionToAssetBalance, AssetKindOf, BalanceOf> for Pallet +where + T: Config, +{ + type Error = pallet::Error; + + fn to_asset_balance( + balance: BalanceOf, + asset_kind: AssetKindOf, + ) -> Result, pallet::Error> { + let rate = pallet::ConversionRateToNative::::get(asset_kind) + .ok_or(pallet::Error::::UnknownAssetKind.into())?; + + // We cannot use `saturating_div` here so we use `checked_div`. + Ok(FixedU128::from_u32(1) + .checked_div(&rate) + .ok_or(pallet::Error::::Overflow.into())? + .saturating_mul_int(balance)) + } +} diff --git a/substrate/frame/asset-rate/src/tests.rs b/substrate/frame/asset-rate/src/tests.rs index 452265476093b034256907e9d2a232c91041f772..fc7bf2f150311b638ba3a7bc939d6b62ba927cc0 100644 --- a/substrate/frame/asset-rate/src/tests.rs +++ b/substrate/frame/asset-rate/src/tests.rs @@ -131,12 +131,19 @@ fn convert_works() { FixedU128::from_float(2.51) )); - let conversion = , ::AssetKind, BalanceOf, >>::from_asset_balance(10, ASSET_ID); - assert_eq!(conversion.expect("Conversion rate exists for asset"), 25); + assert_eq!(conversion_from_asset.expect("Conversion rate exists for asset"), 25); + + let conversion_to_asset = , + ::AssetKind, + BalanceOf, + >>::to_asset_balance(25, ASSET_ID); + assert_eq!(conversion_to_asset.expect("Conversion rate exists for asset"), 9); }); } @@ -151,3 +158,21 @@ fn convert_unknown_throws() { assert!(conversion.is_err()); }); } + +#[test] +fn convert_overflow_throws() { + new_test_ext().execute_with(|| { + assert_ok!(AssetRate::create( + RuntimeOrigin::root(), + Box::new(ASSET_ID), + FixedU128::from_u32(0) + )); + + let conversion = , + ::AssetKind, + BalanceOf, + >>::to_asset_balance(10, ASSET_ID); + assert!(conversion.is_err()); + }); +} diff --git a/substrate/frame/assets/src/lib.rs b/substrate/frame/assets/src/lib.rs index cafe7bb1a3b53df4b48d2a1b9a45cdf14c18cea8..09d59ae1b8b5320649dd2ba83f139701aba3d800 100644 --- a/substrate/frame/assets/src/lib.rs +++ b/substrate/frame/assets/src/lib.rs @@ -208,7 +208,7 @@ pub mod pallet { }; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] @@ -226,10 +226,42 @@ pub mod pallet { } } - #[pallet::config] + /// Default implementations of [`DefaultConfig`], which can be used to implement [`Config`]. + pub mod config_preludes { + use super::*; + use frame_support::{derive_impl, traits::ConstU64}; + pub struct TestDefaultConfig; + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig, no_aggregated_types)] + impl frame_system::DefaultConfig for TestDefaultConfig {} + + #[frame_support::register_default_impl(TestDefaultConfig)] + impl DefaultConfig for TestDefaultConfig { + #[inject_runtime_type] + type RuntimeEvent = (); + type Balance = u64; + type RemoveItemsLimit = ConstU32<5>; + type AssetId = u32; + type AssetIdParameter = u32; + type AssetDeposit = ConstU64<1>; + type AssetAccountDeposit = ConstU64<10>; + type MetadataDepositBase = ConstU64<1>; + type MetadataDepositPerByte = ConstU64<1>; + type ApprovalDeposit = ConstU64<1>; + type StringLimit = ConstU32<50>; + type Extra = (); + type CallbackHandle = (); + type WeightInfo = (); + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = (); + } + } + + #[pallet::config(with_default)] /// The module configuration trait. pub trait Config: frame_system::Config { /// The overarching event type. + #[pallet::no_default_bounds] type RuntimeEvent: From> + IsType<::RuntimeEvent>; @@ -262,10 +294,12 @@ pub mod pallet { type AssetIdParameter: Parameter + From + Into + MaxEncodedLen; /// The currency mechanism. + #[pallet::no_default] type Currency: ReservableCurrency; /// Standard asset class creation is only allowed if the origin attempting it and the /// asset class are in this set. + #[pallet::no_default] type CreateOrigin: EnsureOriginWithArg< Self::RuntimeOrigin, Self::AssetId, @@ -274,28 +308,34 @@ pub mod pallet { /// The origin which may forcibly create or destroy an asset or otherwise alter privileged /// attributes. + #[pallet::no_default] type ForceOrigin: EnsureOrigin; /// The basic amount of funds that must be reserved for an asset. #[pallet::constant] + #[pallet::no_default_bounds] type AssetDeposit: Get>; /// The amount of funds that must be reserved for a non-provider asset account to be /// maintained. #[pallet::constant] + #[pallet::no_default_bounds] type AssetAccountDeposit: Get>; /// The basic amount of funds that must be reserved when adding metadata to your asset. #[pallet::constant] + #[pallet::no_default_bounds] type MetadataDepositBase: Get>; /// The additional funds that must be reserved for the number of bytes you store in your /// metadata. #[pallet::constant] + #[pallet::no_default_bounds] type MetadataDepositPerByte: Get>; /// The amount of funds that must be reserved when creating a new approval. #[pallet::constant] + #[pallet::no_default_bounds] type ApprovalDeposit: Get>; /// The maximum length of a name or symbol stored on-chain. @@ -304,6 +344,7 @@ pub mod pallet { /// A hook to allow a per-asset, per-account minimum balance to be enforced. This must be /// respected in all permissionless operations. + #[pallet::no_default] type Freezer: FrozenBalance; /// Additional data to be stored with an account's asset balance. diff --git a/substrate/frame/assets/src/migration.rs b/substrate/frame/assets/src/migration.rs index ff0ffbff0d362fbd630e22dcb293f28e2b75469c..dd7c12293e80f410301bb47cca013ae4f013e72f 100644 --- a/substrate/frame/assets/src/migration.rs +++ b/substrate/frame/assets/src/migration.rs @@ -67,9 +67,9 @@ pub mod v1 { pub struct MigrateToV1(core::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV1 { fn on_runtime_upgrade() -> Weight { - let current_version = Pallet::::current_storage_version(); - let onchain_version = Pallet::::on_chain_storage_version(); - if onchain_version == 0 && current_version == 1 { + let in_code_version = Pallet::::in_code_storage_version(); + let on_chain_version = Pallet::::on_chain_storage_version(); + if on_chain_version == 0 && in_code_version == 1 { let mut translated = 0u64; Asset::::translate::< OldAssetDetails>, @@ -78,12 +78,12 @@ pub mod v1 { translated.saturating_inc(); Some(old_value.migrate_to_v1()) }); - current_version.put::>(); + in_code_version.put::>(); log::info!( target: LOG_TARGET, "Upgraded {} pools, storage to version {:?}", translated, - current_version + in_code_version ); T::DbWeight::get().reads_writes(translated + 1, translated + 1) } else { @@ -116,13 +116,13 @@ pub mod v1 { "the asset count before and after the migration should be the same" ); - let current_version = Pallet::::current_storage_version(); - let onchain_version = Pallet::::on_chain_storage_version(); + let in_code_version = Pallet::::in_code_storage_version(); + let on_chain_version = Pallet::::on_chain_storage_version(); - frame_support::ensure!(current_version == 1, "must_upgrade"); + frame_support::ensure!(in_code_version == 1, "must_upgrade"); ensure!( - current_version == onchain_version, - "after migration, the current_version and onchain_version should be the same" + in_code_version == on_chain_version, + "after migration, the in_code_version and on_chain_version should be the same" ); Asset::::iter().try_for_each(|(_id, asset)| -> Result<(), TryRuntimeError> { diff --git a/substrate/frame/assets/src/mock.rs b/substrate/frame/assets/src/mock.rs index e1722200c35d4c3674db8a56dbee092f1f7ff8a8..906febb0214fcd9cb125ddd5d307413b9abbbca9 100644 --- a/substrate/frame/assets/src/mock.rs +++ b/substrate/frame/assets/src/mock.rs @@ -108,27 +108,13 @@ impl AssetsCallbackHandle { } } +#[derive_impl(crate::config_preludes::TestDefaultConfig)] impl Config for Test { - type RuntimeEvent = RuntimeEvent; - type Balance = u64; - type AssetId = u32; - type AssetIdParameter = u32; type Currency = Balances; type CreateOrigin = AsEnsureOriginWithArg>; type ForceOrigin = frame_system::EnsureRoot; - type AssetDeposit = ConstU64<1>; - type AssetAccountDeposit = ConstU64<10>; - type MetadataDepositBase = ConstU64<1>; - type MetadataDepositPerByte = ConstU64<1>; - type ApprovalDeposit = ConstU64<1>; - type StringLimit = ConstU32<50>; type Freezer = TestFreezer; - type WeightInfo = (); type CallbackHandle = AssetsCallbackHandle; - type Extra = (); - type RemoveItemsLimit = ConstU32<5>; - #[cfg(feature = "runtime-benchmarks")] - type BenchmarkHelper = (); } use std::collections::HashMap; diff --git a/substrate/frame/babe/src/lib.rs b/substrate/frame/babe/src/lib.rs index a6e44390dbc534e15e3a1658513c6ce9fa25088e..5fb107dde3bad06e6f4812d27ff1159a95194442 100644 --- a/substrate/frame/babe/src/lib.rs +++ b/substrate/frame/babe/src/lib.rs @@ -323,7 +323,7 @@ pub mod pallet { #[pallet::genesis_config] pub struct GenesisConfig { pub authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, - pub epoch_config: Option, + pub epoch_config: BabeEpochConfiguration, #[serde(skip)] pub _config: sp_std::marker::PhantomData, } @@ -333,9 +333,7 @@ pub mod pallet { fn build(&self) { SegmentIndex::::put(0); Pallet::::initialize_genesis_authorities(&self.authorities); - EpochConfig::::put( - self.epoch_config.clone().expect("epoch_config must not be None"), - ); + EpochConfig::::put(&self.epoch_config); } } diff --git a/substrate/frame/balances/src/lib.rs b/substrate/frame/balances/src/lib.rs index 7dd087eabd6343df653905856c078d3f6f2a3908..e87aa6f9311e30311066e21e73e8e006787fed90 100644 --- a/substrate/frame/balances/src/lib.rs +++ b/substrate/frame/balances/src/lib.rs @@ -320,7 +320,7 @@ pub mod pallet { type MaxFreezes: Get; } - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: frame_support::traits::StorageVersion = frame_support::traits::StorageVersion::new(1); diff --git a/substrate/frame/balances/src/migration.rs b/substrate/frame/balances/src/migration.rs index ba6819ec6e814da69807c8717a088df2451b6cb5..38d9c07ff7e002f6a7a57189b7ebce6830b52c75 100644 --- a/substrate/frame/balances/src/migration.rs +++ b/substrate/frame/balances/src/migration.rs @@ -22,9 +22,9 @@ use frame_support::{ }; fn migrate_v0_to_v1, I: 'static>(accounts: &[T::AccountId]) -> Weight { - let onchain_version = Pallet::::on_chain_storage_version(); + let on_chain_version = Pallet::::on_chain_storage_version(); - if onchain_version == 0 { + if on_chain_version == 0 { let total = accounts .iter() .map(|a| Pallet::::total_balance(a)) @@ -76,9 +76,9 @@ impl, A: Get>, I: 'static> OnRuntimeUpgrade pub struct ResetInactive(PhantomData<(T, I)>); impl, I: 'static> OnRuntimeUpgrade for ResetInactive { fn on_runtime_upgrade() -> Weight { - let onchain_version = Pallet::::on_chain_storage_version(); + let on_chain_version = Pallet::::on_chain_storage_version(); - if onchain_version == 1 { + if on_chain_version == 1 { // Remove the old `StorageVersion` type. frame_support::storage::unhashed::kill(&frame_support::storage::storage_prefix( Pallet::::name().as_bytes(), diff --git a/substrate/frame/beefy-mmr/Cargo.toml b/substrate/frame/beefy-mmr/Cargo.toml index 25290890fcddacc1918dab913e64a795fefeb0f3..17707731773118b7028f3cff8031982c0c5d704b 100644 --- a/substrate/frame/beefy-mmr/Cargo.toml +++ b/substrate/frame/beefy-mmr/Cargo.toml @@ -16,7 +16,7 @@ array-bytes = { version = "6.1", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", optional = true } +serde = { optional = true, workspace = true, default-features = true } binary-merkle-tree = { path = "../../utils/binary-merkle-tree", default-features = false } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/beefy/Cargo.toml b/substrate/frame/beefy/Cargo.toml index 866b032c33136dc84811d55ca505c79770f8f5d1..e38eaa6fb0787aca48b07fad57e0ea0adffbe7e4 100644 --- a/substrate/frame/beefy/Cargo.toml +++ b/substrate/frame/beefy/Cargo.toml @@ -15,7 +15,7 @@ workspace = true codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } -serde = { version = "1.0.196", optional = true } +serde = { optional = true, workspace = true, default-features = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } pallet-authorship = { path = "../authorship", default-features = false } diff --git a/substrate/frame/benchmarking/Cargo.toml b/substrate/frame/benchmarking/Cargo.toml index 8be7a500d3fef28e8757ebe92233bddfb2eb26ae..bf42aae979cd0edd3a00ca98f8aacaba55d807dc 100644 --- a/substrate/frame/benchmarking/Cargo.toml +++ b/substrate/frame/benchmarking/Cargo.toml @@ -21,7 +21,7 @@ linregress = { version = "0.5.1", optional = true } log = { workspace = true } paste = "1.0" scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", optional = true } +serde = { optional = true, workspace = true, default-features = true } frame-support = { path = "../support", default-features = false } frame-support-procedural = { path = "../support/procedural", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/broker/src/adapt_price.rs b/substrate/frame/broker/src/adapt_price.rs index 8266625687a23073a335bcab58db6ef7c33c8d80..fbcd7afdf0da059fb7023de3422f15b2344d5407 100644 --- a/substrate/frame/broker/src/adapt_price.rs +++ b/substrate/frame/broker/src/adapt_price.rs @@ -19,6 +19,7 @@ use crate::CoreIndex; use sp_arithmetic::{traits::One, FixedU64}; +use sp_runtime::Saturating; /// Type for determining how to set price. pub trait AdaptPrice { @@ -49,14 +50,24 @@ impl AdaptPrice for () { pub struct Linear; impl AdaptPrice for Linear { fn leadin_factor_at(when: FixedU64) -> FixedU64 { - FixedU64::from(2) - when + FixedU64::from(2).saturating_sub(when) } fn adapt_price(sold: CoreIndex, target: CoreIndex, limit: CoreIndex) -> FixedU64 { if sold <= target { - FixedU64::from_rational(sold.into(), target.into()) + // Range of [0.5, 1.0]. + FixedU64::from_rational(1, 2).saturating_add(FixedU64::from_rational( + sold.into(), + target.saturating_mul(2).into(), + )) } else { - FixedU64::one() + - FixedU64::from_rational((sold - target).into(), (limit - target).into()) + // Range of (1.0, 2]. + + // Unchecked math: In this branch we know that sold > target. The limit must be >= sold + // by construction, and thus target must be < limit. + FixedU64::one().saturating_add(FixedU64::from_rational( + (sold - target).into(), + (limit - target).into(), + )) } } } @@ -81,4 +92,23 @@ mod tests { } } } + + #[test] + fn linear_bound_check() { + // Using constraints from pallet implementation i.e. `limit >= sold`. + // Check extremes + let limit = 10; + let target = 5; + + // Maximally sold: `sold == limit` + assert_eq!(Linear::adapt_price(limit, target, limit), FixedU64::from_float(2.0)); + // Ideally sold: `sold == target` + assert_eq!(Linear::adapt_price(target, target, limit), FixedU64::one()); + // Minimally sold: `sold == 0` + assert_eq!(Linear::adapt_price(0, target, limit), FixedU64::from_float(0.5)); + // Optimistic target: `target == limit` + assert_eq!(Linear::adapt_price(limit, limit, limit), FixedU64::one()); + // Pessimistic target: `target == 0` + assert_eq!(Linear::adapt_price(limit, 0, limit), FixedU64::from_float(2.0)); + } } diff --git a/substrate/frame/collective/src/benchmarking.rs b/substrate/frame/collective/src/benchmarking.rs index 390bddc2c0a9f81c87b342b9ed86a1ddaddafe80..d91512cd95a343c0b626f17d03e038204432a1e5 100644 --- a/substrate/frame/collective/src/benchmarking.rs +++ b/substrate/frame/collective/src/benchmarking.rs @@ -128,7 +128,7 @@ mod benchmarks { ); new_members.sort(); - assert_eq!(Collective::::members(), new_members); + assert_eq!(Members::::get(), new_members); Ok(()) } @@ -255,7 +255,7 @@ mod benchmarks { )?; } - assert_eq!(Collective::::proposals().len(), (p - 1) as usize); + assert_eq!(Proposals::::get().len(), (p - 1) as usize); if let Some(deposit) = T::ProposalDeposit::get_deposit(p) { T::Currency::mint_into(&caller, deposit)?; @@ -272,7 +272,7 @@ mod benchmarks { ); // New proposal is recorded - assert_eq!(Collective::::proposals().len(), p as usize); + assert_eq!(Proposals::::get().len(), p as usize); let proposal_hash = T::Hashing::hash_of(&proposal); assert_last_event::( Event::Proposed { account: caller, proposal_index: p - 1, proposal_hash, threshold } @@ -350,7 +350,7 @@ mod benchmarks { approve, )?; - assert_eq!(Collective::::proposals().len(), p as usize); + assert_eq!(Proposals::::get().len(), p as usize); // Voter switches vote to nay, but does not kill the vote, just updates + inserts let approve = false; @@ -363,8 +363,8 @@ mod benchmarks { _(SystemOrigin::Signed(voter), last_hash, index, approve); // All proposals exist and the last proposal has just been updated. - assert_eq!(Collective::::proposals().len(), p as usize); - let voting = Collective::::voting(&last_hash).ok_or("Proposal Missing")?; + assert_eq!(Proposals::::get().len(), p as usize); + let voting = Voting::::get(&last_hash).ok_or("Proposal Missing")?; assert_eq!(voting.ayes.len(), (m - 3) as usize); assert_eq!(voting.nays.len(), 1); Ok(()) @@ -441,7 +441,7 @@ mod benchmarks { approve, )?; - assert_eq!(Collective::::proposals().len(), p as usize); + assert_eq!(Proposals::::get().len(), p as usize); // Voter switches vote to nay, which kills the vote let approve = false; @@ -460,7 +460,7 @@ mod benchmarks { close(SystemOrigin::Signed(voter), last_hash, index, Weight::MAX, bytes_in_storage); // The last proposal is removed. - assert_eq!(Collective::::proposals().len(), (p - 1) as usize); + assert_eq!(Proposals::::get().len(), (p - 1) as usize); assert_last_event::(Event::Disapproved { proposal_hash: last_hash }.into()); Ok(()) } @@ -542,7 +542,7 @@ mod benchmarks { true, )?; - assert_eq!(Collective::::proposals().len(), p as usize); + assert_eq!(Proposals::::get().len(), p as usize); // Caller switches vote to aye, which passes the vote let index = p - 1; @@ -558,7 +558,7 @@ mod benchmarks { close(SystemOrigin::Signed(caller), last_hash, index, Weight::MAX, bytes_in_storage); // The last proposal is removed. - assert_eq!(Collective::::proposals().len(), (p - 1) as usize); + assert_eq!(Proposals::::get().len(), (p - 1) as usize); assert_last_event::( Event::Executed { proposal_hash: last_hash, result: Ok(()) }.into(), ); @@ -647,13 +647,13 @@ mod benchmarks { )?; System::::set_block_number(BlockNumberFor::::max_value()); - assert_eq!(Collective::::proposals().len(), p as usize); + assert_eq!(Proposals::::get().len(), p as usize); // Prime nay will close it as disapproved #[extrinsic_call] close(SystemOrigin::Signed(caller), last_hash, index, Weight::MAX, bytes_in_storage); - assert_eq!(Collective::::proposals().len(), (p - 1) as usize); + assert_eq!(Proposals::::get().len(), (p - 1) as usize); assert_last_event::(Event::Disapproved { proposal_hash: last_hash }.into()); Ok(()) } @@ -729,13 +729,13 @@ mod benchmarks { // caller is prime, prime already votes aye by creating the proposal System::::set_block_number(BlockNumberFor::::max_value()); - assert_eq!(Collective::::proposals().len(), p as usize); + assert_eq!(Proposals::::get().len(), p as usize); // Prime aye will close it as approved #[extrinsic_call] close(SystemOrigin::Signed(caller), last_hash, p - 1, Weight::MAX, bytes_in_storage); - assert_eq!(Collective::::proposals().len(), (p - 1) as usize); + assert_eq!(Proposals::::get().len(), (p - 1) as usize); assert_last_event::( Event::Executed { proposal_hash: last_hash, result: Ok(()) }.into(), ); @@ -788,7 +788,7 @@ mod benchmarks { } System::::set_block_number(BlockNumberFor::::max_value()); - assert_eq!(Collective::::proposals().len(), p as usize); + assert_eq!(Proposals::::get().len(), p as usize); let origin = T::DisapproveOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; @@ -796,7 +796,7 @@ mod benchmarks { #[extrinsic_call] _(origin as ::RuntimeOrigin, last_hash); - assert_eq!(Collective::::proposals().len(), (p - 1) as usize); + assert_eq!(Proposals::::get().len(), (p - 1) as usize); assert_last_event::(Event::Disapproved { proposal_hash: last_hash }.into()); Ok(()) } @@ -853,7 +853,7 @@ mod benchmarks { } System::::set_block_number(BlockNumberFor::::max_value()); - assert_eq!(Collective::::proposals().len(), p as usize); + assert_eq!(Proposals::::get().len(), p as usize); if d == 0 { DepositOf::::remove(last_hash); @@ -865,7 +865,7 @@ mod benchmarks { #[extrinsic_call] _(origin as ::RuntimeOrigin, last_hash); - assert_eq!(Collective::::proposals().len(), (p - 1) as usize); + assert_eq!(Proposals::::get().len(), (p - 1) as usize); assert_last_event::(Event::Killed { proposal_hash: last_hash }.into()); if d != 0 { if let Some(deposit) = T::ProposalDeposit::get_deposit(p - 1) { @@ -930,15 +930,15 @@ mod benchmarks { } System::::set_block_number(BlockNumberFor::::max_value()); - assert_eq!(Collective::::proposals().len(), p as usize); + assert_eq!(Proposals::::get().len(), p as usize); if d == 0 { DepositOf::::remove(last_hash); } - assert_eq!(Collective::::proposals().len(), p as usize); + assert_eq!(Proposals::::get().len(), p as usize); let _ = Collective::::remove_proposal(last_hash); - assert_eq!(Collective::::proposals().len(), (p - 1) as usize); + assert_eq!(Proposals::::get().len(), (p - 1) as usize); #[extrinsic_call] _(SystemOrigin::Signed(caller.clone()), last_hash); diff --git a/substrate/frame/collective/src/lib.rs b/substrate/frame/collective/src/lib.rs index 65f000a351b68f3212890f6cb4c93817d2f0be10..5f8c456b547a52d0339db305c6c9cc72f81a6b03 100644 --- a/substrate/frame/collective/src/lib.rs +++ b/substrate/frame/collective/src/lib.rs @@ -341,7 +341,7 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); #[pallet::pallet] @@ -450,13 +450,11 @@ pub mod pallet { /// The hashes of the active proposals. #[pallet::storage] - #[pallet::getter(fn proposals)] pub type Proposals, I: 'static = ()> = StorageValue<_, BoundedVec, ValueQuery>; /// Actual proposal for a given hash, if it's current. #[pallet::storage] - #[pallet::getter(fn proposal_of)] pub type ProposalOf, I: 'static = ()> = StorageMap<_, Identity, T::Hash, >::Proposal, OptionQuery>; @@ -468,24 +466,20 @@ pub mod pallet { /// Votes on a given proposal, if it is ongoing. #[pallet::storage] - #[pallet::getter(fn voting)] pub type Voting, I: 'static = ()> = StorageMap<_, Identity, T::Hash, Votes>, OptionQuery>; /// Proposals so far. #[pallet::storage] - #[pallet::getter(fn proposal_count)] pub type ProposalCount, I: 'static = ()> = StorageValue<_, u32, ValueQuery>; /// The current members of the collective. This is stored sorted (just by value). #[pallet::storage] - #[pallet::getter(fn members)] pub type Members, I: 'static = ()> = StorageValue<_, Vec, ValueQuery>; /// The prime member that helps determine the default vote behavior in case of absentations. #[pallet::storage] - #[pallet::getter(fn prime)] pub type Prime, I: 'static = ()> = StorageValue<_, T::AccountId, OptionQuery>; #[pallet::event] @@ -678,7 +672,7 @@ pub mod pallet { #[pallet::compact] length_bound: u32, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; - let members = Self::members(); + let members = Members::::get(); ensure!(members.contains(&who), Error::::NotMember); let proposal_len = proposal.encoded_size(); ensure!(proposal_len <= length_bound as usize, Error::::WrongProposalLength); @@ -738,7 +732,7 @@ pub mod pallet { #[pallet::compact] length_bound: u32, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; - let members = Self::members(); + let members = Members::::get(); ensure!(members.contains(&who), Error::::NotMember); if threshold < 2 { @@ -784,7 +778,7 @@ pub mod pallet { approve: bool, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; - let members = Self::members(); + let members = Members::::get(); ensure!(members.contains(&who), Error::::NotMember); // Detects first vote of the member in the motion @@ -921,7 +915,7 @@ impl, I: 'static> Pallet { pub fn is_member(who: &T::AccountId) -> bool { // Note: The dispatchables *do not* use this to check membership so make sure // to update those if this is changed. - Self::members().contains(who) + Members::::get().contains(who) } /// Execute immediately when adding a new proposal. @@ -940,7 +934,7 @@ impl, I: 'static> Pallet { let proposal_hash = T::Hashing::hash_of(&proposal); ensure!(!>::contains_key(proposal_hash), Error::::DuplicateProposal); - let seats = Self::members().len() as MemberCount; + let seats = Members::::get().len() as MemberCount; let result = proposal.dispatch(RawOrigin::Members(1, seats).into()); Self::deposit_event(Event::Executed { proposal_hash, @@ -978,7 +972,8 @@ impl, I: 'static> Pallet { >::insert(proposal_hash, (who.clone(), deposit)); } - let index = Self::proposal_count(); + let index = ProposalCount::::get(); + >::mutate(|i| *i += 1); >::insert(proposal_hash, proposal); let votes = { @@ -1004,7 +999,7 @@ impl, I: 'static> Pallet { index: ProposalIndex, approve: bool, ) -> Result { - let mut voting = Self::voting(&proposal).ok_or(Error::::ProposalMissing)?; + let mut voting = Voting::::get(&proposal).ok_or(Error::::ProposalMissing)?; ensure!(voting.index == index, Error::::WrongIndex); let position_yes = voting.ayes.iter().position(|a| a == &who); @@ -1055,12 +1050,12 @@ impl, I: 'static> Pallet { proposal_weight_bound: Weight, length_bound: u32, ) -> DispatchResultWithPostInfo { - let voting = Self::voting(&proposal_hash).ok_or(Error::::ProposalMissing)?; + let voting = Voting::::get(&proposal_hash).ok_or(Error::::ProposalMissing)?; ensure!(voting.index == index, Error::::WrongIndex); let mut no_votes = voting.nays.len() as MemberCount; let mut yes_votes = voting.ayes.len() as MemberCount; - let seats = Self::members().len() as MemberCount; + let seats = Members::::get().len() as MemberCount; let approved = yes_votes >= voting.threshold; let disapproved = seats.saturating_sub(no_votes) < voting.threshold; // Allow (dis-)approving the proposal as soon as there are enough votes. @@ -1094,7 +1089,7 @@ impl, I: 'static> Pallet { // Only allow actual closing of the proposal after the voting period has ended. ensure!(frame_system::Pallet::::block_number() >= voting.end, Error::::TooEarly); - let prime_vote = Self::prime().map(|who| voting.ayes.iter().any(|a| a == &who)); + let prime_vote = Prime::::get().map(|who| voting.ayes.iter().any(|a| a == &who)); // default voting strategy. let default = T::DefaultVote::default_vote(prime_vote, yes_votes, no_votes, seats); @@ -1274,29 +1269,28 @@ impl, I: 'static> Pallet { /// * The prime account must be a member of the collective. #[cfg(any(feature = "try-runtime", test))] fn do_try_state() -> Result<(), TryRuntimeError> { - Self::proposals() - .into_iter() - .try_for_each(|proposal| -> Result<(), TryRuntimeError> { + Proposals::::get().into_iter().try_for_each( + |proposal| -> Result<(), TryRuntimeError> { ensure!( - Self::proposal_of(proposal).is_some(), + ProposalOf::::get(proposal).is_some(), "Proposal hash from `Proposals` is not found inside the `ProposalOf` mapping." ); Ok(()) - })?; + }, + )?; ensure!( - Self::proposals().into_iter().count() <= Self::proposal_count() as usize, + Proposals::::get().into_iter().count() <= ProposalCount::::get() as usize, "The actual number of proposals is greater than `ProposalCount`" ); ensure!( - Self::proposals().into_iter().count() == >::iter_keys().count(), + Proposals::::get().into_iter().count() == >::iter_keys().count(), "Proposal count inside `Proposals` is not equal to the proposal count in `ProposalOf`" ); - Self::proposals() - .into_iter() - .try_for_each(|proposal| -> Result<(), TryRuntimeError> { - if let Some(votes) = Self::voting(proposal) { + Proposals::::get().into_iter().try_for_each( + |proposal| -> Result<(), TryRuntimeError> { + if let Some(votes) = Voting::::get(proposal) { let ayes = votes.ayes.len(); let nays = votes.nays.len(); @@ -1306,13 +1300,13 @@ impl, I: 'static> Pallet { ); } Ok(()) - })?; + }, + )?; let mut proposal_indices = vec![]; - Self::proposals() - .into_iter() - .try_for_each(|proposal| -> Result<(), TryRuntimeError> { - if let Some(votes) = Self::voting(proposal) { + Proposals::::get().into_iter().try_for_each( + |proposal| -> Result<(), TryRuntimeError> { + if let Some(votes) = Voting::::get(proposal) { let proposal_index = votes.index; ensure!( !proposal_indices.contains(&proposal_index), @@ -1321,12 +1315,13 @@ impl, I: 'static> Pallet { proposal_indices.push(proposal_index); } Ok(()) - })?; + }, + )?; >::iter_keys().try_for_each( |proposal_hash| -> Result<(), TryRuntimeError> { ensure!( - Self::proposals().contains(&proposal_hash), + Proposals::::get().contains(&proposal_hash), "`Proposals` doesn't contain the proposal hash from the `Voting` storage map." ); Ok(()) @@ -1334,17 +1329,17 @@ impl, I: 'static> Pallet { )?; ensure!( - Self::members().len() <= T::MaxMembers::get() as usize, + Members::::get().len() <= T::MaxMembers::get() as usize, "The member count is greater than `MaxMembers`." ); ensure!( - Self::members().windows(2).all(|members| members[0] <= members[1]), + Members::::get().windows(2).all(|members| members[0] <= members[1]), "The members are not sorted by value." ); - if let Some(prime) = Self::prime() { - ensure!(Self::members().contains(&prime), "Prime account is not a member."); + if let Some(prime) = Prime::::get() { + ensure!(Members::::get().contains(&prime), "Prime account is not a member."); } Ok(()) @@ -1378,7 +1373,7 @@ impl, I: 'static> ChangeMembers for Pallet { // remove accounts from all current voting in motions. let mut outgoing = outgoing.to_vec(); outgoing.sort(); - for h in Self::proposals().into_iter() { + for h in Proposals::::get().into_iter() { >::mutate(h, |v| { if let Some(mut votes) = v.take() { votes.ayes = votes diff --git a/substrate/frame/collective/src/tests.rs b/substrate/frame/collective/src/tests.rs index 5bdaa90415a6beb7ac9ff9aa85bafd2e632f4595..f66e5ae8915a4a8cf4b5b0356557f9b042935d06 100644 --- a/substrate/frame/collective/src/tests.rs +++ b/substrate/frame/collective/src/tests.rs @@ -241,8 +241,8 @@ fn default_max_proposal_weight() -> Weight { #[test] fn motions_basic_environment_works() { ExtBuilder::default().build_and_execute(|| { - assert_eq!(Collective::members(), vec![1, 2, 3]); - assert_eq!(*Collective::proposals(), Vec::::new()); + assert_eq!(Members::::get(), vec![1, 2, 3]); + assert_eq!(*Proposals::::get(), Vec::::new()); }); } @@ -253,7 +253,7 @@ fn initialize_members_sorts_members() { ExtBuilder::default() .set_collective_members(unsorted_members) .build_and_execute(|| { - assert_eq!(Collective::members(), expected_members); + assert_eq!(Members::::get(), expected_members); }); } @@ -267,8 +267,8 @@ fn set_members_with_prime_works() { Some(3), MaxMembers::get() )); - assert_eq!(Collective::members(), members.clone()); - assert_eq!(Collective::prime(), Some(3)); + assert_eq!(Members::::get(), members.clone()); + assert_eq!(Prime::::get(), Some(3)); assert_noop!( Collective::set_members(RuntimeOrigin::root(), members, Some(4), MaxMembers::get()), Error::::PrimeAccountNotMember @@ -702,12 +702,12 @@ fn removal_of_old_voters_votes_works() { assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 0, true)); assert_eq!( - Collective::voting(&hash), + Voting::::get(&hash), Some(Votes { index: 0, threshold: 3, ayes: vec![1, 2], nays: vec![], end }) ); Collective::change_members_sorted(&[4], &[1], &[2, 3, 4]); assert_eq!( - Collective::voting(&hash), + Voting::::get(&hash), Some(Votes { index: 0, threshold: 3, ayes: vec![2], nays: vec![], end }) ); @@ -723,12 +723,12 @@ fn removal_of_old_voters_votes_works() { assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 1, true)); assert_ok!(Collective::vote(RuntimeOrigin::signed(3), hash, 1, false)); assert_eq!( - Collective::voting(&hash), + Voting::::get(&hash), Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![3], end }) ); Collective::change_members_sorted(&[], &[3], &[2, 4]); assert_eq!( - Collective::voting(&hash), + Voting::::get(&hash), Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![], end }) ); }); @@ -750,7 +750,7 @@ fn removal_of_old_voters_votes_works_with_set_members() { assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 0, true)); assert_eq!( - Collective::voting(&hash), + Voting::::get(&hash), Some(Votes { index: 0, threshold: 3, ayes: vec![1, 2], nays: vec![], end }) ); assert_ok!(Collective::set_members( @@ -760,7 +760,7 @@ fn removal_of_old_voters_votes_works_with_set_members() { MaxMembers::get() )); assert_eq!( - Collective::voting(&hash), + Voting::::get(&hash), Some(Votes { index: 0, threshold: 3, ayes: vec![2], nays: vec![], end }) ); @@ -776,7 +776,7 @@ fn removal_of_old_voters_votes_works_with_set_members() { assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 1, true)); assert_ok!(Collective::vote(RuntimeOrigin::signed(3), hash, 1, false)); assert_eq!( - Collective::voting(&hash), + Voting::::get(&hash), Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![3], end }) ); assert_ok!(Collective::set_members( @@ -786,7 +786,7 @@ fn removal_of_old_voters_votes_works_with_set_members() { MaxMembers::get() )); assert_eq!( - Collective::voting(&hash), + Voting::::get(&hash), Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![], end }) ); }); @@ -805,10 +805,10 @@ fn propose_works() { Box::new(proposal.clone()), proposal_len )); - assert_eq!(*Collective::proposals(), vec![hash]); - assert_eq!(Collective::proposal_of(&hash), Some(proposal)); + assert_eq!(*Proposals::::get(), vec![hash]); + assert_eq!(ProposalOf::::get(&hash), Some(proposal)); assert_eq!( - Collective::voting(&hash), + Voting::::get(&hash), Some(Votes { index: 0, threshold: 3, ayes: vec![], nays: vec![], end }) ); @@ -973,13 +973,13 @@ fn motions_vote_after_works() { )); // Initially there a no votes when the motion is proposed. assert_eq!( - Collective::voting(&hash), + Voting::::get(&hash), Some(Votes { index: 0, threshold: 2, ayes: vec![], nays: vec![], end }) ); // Cast first aye vote. assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); assert_eq!( - Collective::voting(&hash), + Voting::::get(&hash), Some(Votes { index: 0, threshold: 2, ayes: vec![1], nays: vec![], end }) ); // Try to cast a duplicate aye vote. @@ -990,7 +990,7 @@ fn motions_vote_after_works() { // Cast a nay vote. assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, false)); assert_eq!( - Collective::voting(&hash), + Voting::::get(&hash), Some(Votes { index: 0, threshold: 2, ayes: vec![], nays: vec![1], end }) ); // Try to cast a duplicate nay vote. @@ -1041,7 +1041,7 @@ fn motions_all_first_vote_free_works() { proposal_len, )); assert_eq!( - Collective::voting(&hash), + Voting::::get(&hash), Some(Votes { index: 0, threshold: 2, ayes: vec![], nays: vec![], end }) ); @@ -1106,14 +1106,14 @@ fn motions_reproposing_disapproved_works() { proposal_weight, proposal_len )); - assert_eq!(*Collective::proposals(), vec![]); + assert_eq!(*Proposals::::get(), vec![]); assert_ok!(Collective::propose( RuntimeOrigin::signed(1), 2, Box::new(proposal.clone()), proposal_len )); - assert_eq!(*Collective::proposals(), vec![hash]); + assert_eq!(*Proposals::::get(), vec![hash]); }); } diff --git a/substrate/frame/contracts/CHANGELOG.md b/substrate/frame/contracts/CHANGELOG.md deleted file mode 100644 index aca94e5b149152d5c598102c633048baa17d1276..0000000000000000000000000000000000000000 --- a/substrate/frame/contracts/CHANGELOG.md +++ /dev/null @@ -1,116 +0,0 @@ -# Changelog - -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -The semantic versioning guarantees cover the interface to the Substrate runtime which -includes this pallet as a dependency. This module will also add storage migrations whenever -changes require it. Stability with regard to offchain tooling is explicitly excluded from -this guarantee: For example adding a new field to an in-storage data structure will require -changes to frontends to properly display it. However, those changes will still be regarded -as a minor version bump. - -The interface provided to smart contracts will adhere to semver with one exception: Even -major version bumps will be backwards compatible with regard to already deployed contracts. -In other words: Upgrading this pallet will not break pre-existing contracts. - -## [Unreleased] - -### Added - -- Forbid calling back to contracts after switching to runtime -[#13443](https://github.com/paritytech/substrate/pull/13443) - -- Allow contracts to dispatch calls into the runtime (**unstable**) -[#9276](https://github.com/paritytech/substrate/pull/9276) - -- New version of `seal_call` that offers more features. -[#8909](https://github.com/paritytech/substrate/pull/8909) - -- New `instantiate` RPC that allows clients to dry-run contract instantiation. -[#8451](https://github.com/paritytech/substrate/pull/8451) - -- New version of `seal_random` which exposes additional information. -[#8329](https://github.com/paritytech/substrate/pull/8329) - -### Changed - -- Replaced storage rent with automatic storage deposits -[#9669](https://github.com/paritytech/substrate/pull/9669) -[#10082](https://github.com/paritytech/substrate/pull/10082) - -- Replaced `seal_println` with the `seal_debug_message` API which allows outputting debug -messages to the console and RPC clients. -[#8773](https://github.com/paritytech/substrate/pull/8773) -[#9550](https://github.com/paritytech/substrate/pull/9550) - -- Make storage and fields of `Schedule` private to the crate. -[#8359](https://github.com/paritytech/substrate/pull/8359) - -### Fixed - -- Remove pre-charging which caused wrongly estimated weights -[#8976](https://github.com/paritytech/substrate/pull/8976) - -## [v3.0.0] 2021-02-25 - -This version constitutes the first release that brings any stability guarantees (see above). - -### Added - -- Emit an event when a contract terminates (self-destructs). -[#8014](https://github.com/paritytech/substrate/pull/8014) - -- Charge rent for code stored on the chain in addition to the already existing -rent that is paid for data storage. -[#7935](https://github.com/paritytech/substrate/pull/7935) - -- Allow the runtime to configure per storage item costs in addition -to the already existing per byte costs. -[#7819](https://github.com/paritytech/substrate/pull/7819) - -- Contracts are now deleted lazily so that the user who removes a contract -does not need to pay for the deletion of the contract storage. -[#7740](https://github.com/paritytech/substrate/pull/7740) - -- Allow runtime authors to define chain extensions in order to provide custom -functionality to contracts. -[#7548](https://github.com/paritytech/substrate/pull/7548) -[#8003](https://github.com/paritytech/substrate/pull/8003) - -- Proper weights which are fully automated by benchmarking. -[#6715](https://github.com/paritytech/substrate/pull/6715) -[#7017](https://github.com/paritytech/substrate/pull/7017) -[#7361](https://github.com/paritytech/substrate/pull/7361) - -### Changed - -- Collect the rent for one block during instantiation. -[#7847](https://github.com/paritytech/substrate/pull/7847) - -- Instantiation takes a `salt` argument to allow for easier instantion of the -same code by the same sender. -[#7482](https://github.com/paritytech/substrate/pull/7482) - -- Improve the information returned by the `contracts_call` RPC. -[#7468](https://github.com/paritytech/substrate/pull/7468) - -- Simplify the node configuration necessary to add this module. -[#7409](https://github.com/paritytech/substrate/pull/7409) - -### Fixed - -- Consider the code size of a contract in the weight that is charged for -loading a contract from storage. -[#8086](https://github.com/paritytech/substrate/pull/8086) - -- Fix possible overflow in storage size calculation -[#7885](https://github.com/paritytech/substrate/pull/7885) - -- Cap the surcharge reward that can be claimed. -[#7870](https://github.com/paritytech/substrate/pull/7870) - -- Fix a possible DoS vector where contracts could allocate too large buffers. -[#7818](https://github.com/paritytech/substrate/pull/7818) diff --git a/substrate/frame/contracts/Cargo.toml b/substrate/frame/contracts/Cargo.toml index 0b127a5a454800bac4303240438115baaba4da85..be3bafcd23fa107b12005ebe45da874e2a55e16b 100644 --- a/substrate/frame/contracts/Cargo.toml +++ b/substrate/frame/contracts/Cargo.toml @@ -25,7 +25,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = ] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } log = { workspace = true } -serde = { version = "1", optional = true, features = ["derive"] } +serde = { optional = true, features = ["derive"], workspace = true, default-features = true } smallvec = { version = "1", default-features = false, features = [ "const_generics", ] } diff --git a/substrate/frame/contracts/fixtures/Cargo.toml b/substrate/frame/contracts/fixtures/Cargo.toml index 7fdf56a91fcc7c114b0cadd622e9d43e47088f33..9ca9e404c31000569b50ce641619121f68927e34 100644 --- a/substrate/frame/contracts/fixtures/Cargo.toml +++ b/substrate/frame/contracts/fixtures/Cargo.toml @@ -11,7 +11,6 @@ description = "Fixtures for testing contracts pallet." workspace = true [dependencies] -wat = "1" frame-system = { path = "../../system" } sp-runtime = { path = "../../../primitives/runtime" } anyhow = "1.0.0" diff --git a/substrate/frame/contracts/fixtures/contracts/call.rs b/substrate/frame/contracts/fixtures/contracts/call.rs index f7d9d862d7465937f71afff998aecdaa9aa1f4bd..535745ffc0bc505c5cc29dc5959fe31a3e1fe509 100644 --- a/substrate/frame/contracts/fixtures/contracts/call.rs +++ b/substrate/frame/contracts/fixtures/contracts/call.rs @@ -35,11 +35,13 @@ pub extern "C" fn call() { ); // Call the callee - api::call_v1( + api::call_v2( uapi::CallFlags::empty(), callee_addr, - 0u64, // How much gas to devote for the execution. 0 = all. - &0u64.to_le_bytes(), // value transferred to the contract. + 0u64, // How much ref_time to devote for the execution. 0 = all. + 0u64, // How much proof_size to devote for the execution. 0 = all. + None, // No deposit limit. + &0u64.to_le_bytes(), // Value transferred to the contract. callee_input, None, ) diff --git a/substrate/frame/contracts/fixtures/contracts/call_return_code.rs b/substrate/frame/contracts/fixtures/contracts/call_return_code.rs index 1256588d3b58554edf5b1d00e98572681087f2c7..3d5a1073eaecaedabcf774e336abea48ab0b9001 100644 --- a/substrate/frame/contracts/fixtures/contracts/call_return_code.rs +++ b/substrate/frame/contracts/fixtures/contracts/call_return_code.rs @@ -38,11 +38,13 @@ pub extern "C" fn call() { ); // Call the callee - let err_code = match api::call_v1( + let err_code = match api::call_v2( uapi::CallFlags::empty(), callee_addr, - 0u64, // How much gas to devote for the execution. 0 = all. - &100u64.to_le_bytes(), // value transferred to the contract. + 0u64, // How much ref_time to devote for the execution. 0 = all. + 0u64, // How much proof_size to devote for the execution. 0 = all. + None, // No deposit limit. + &100u64.to_le_bytes(), // Value transferred to the contract. input, None, ) { diff --git a/substrate/frame/contracts/fixtures/contracts/call_runtime_and_call.rs b/substrate/frame/contracts/fixtures/contracts/call_runtime_and_call.rs index fdeb6097e732f9e7cc717f1144a51953c138e7f7..1321d36dc7efa17151cff67cfcd6162c91eecaa6 100644 --- a/substrate/frame/contracts/fixtures/contracts/call_runtime_and_call.rs +++ b/substrate/frame/contracts/fixtures/contracts/call_runtime_and_call.rs @@ -39,11 +39,13 @@ pub extern "C" fn call() { api::call_runtime(call).unwrap(); // Call the callee - api::call_v1( + api::call_v2( uapi::CallFlags::empty(), callee_addr, - 0u64, // How much gas to devote for the execution. 0 = all. - &0u64.to_le_bytes(), // value transferred to the contract. + 0u64, // How much ref_time to devote for the execution. 0 = all. + 0u64, // How much proof_size to devote for the execution. 0 = all. + None, // No deposit limit. + &0u64.to_le_bytes(), // Value transferred to the contract. callee_input, None, ) diff --git a/substrate/frame/contracts/fixtures/contracts/call_with_limit.rs b/substrate/frame/contracts/fixtures/contracts/call_with_limit.rs index 40cde234b44ad22a2cd2c8830d04ade2c9ff3603..f0851f32c75bc13d7b89c1bf366595ef1dbd57ca 100644 --- a/substrate/frame/contracts/fixtures/contracts/call_with_limit.rs +++ b/substrate/frame/contracts/fixtures/contracts/call_with_limit.rs @@ -38,7 +38,6 @@ pub extern "C" fn call() { forwarded_input: [u8], ); - #[allow(deprecated)] api::call_v2( uapi::CallFlags::empty(), callee_addr, diff --git a/substrate/frame/contracts/fixtures/contracts/caller_contract.rs b/substrate/frame/contracts/fixtures/contracts/caller_contract.rs index c2629e9fa1971fea3da5a77f3c2d3bdfce3ce601..fffdb66a33d7979da3d81f53ec51e4308f5d355b 100644 --- a/substrate/frame/contracts/fixtures/contracts/caller_contract.rs +++ b/substrate/frame/contracts/fixtures/contracts/caller_contract.rs @@ -40,7 +40,6 @@ pub extern "C" fn call() { let reverted_input = [1u8, 34, 51, 68, 85, 102, 119]; // Fail to deploy the contract since it returns a non-zero exit status. - #[allow(deprecated)] let res = api::instantiate_v2( code_hash, 0u64, // How much ref_time weight to devote for the execution. 0 = all. @@ -55,7 +54,6 @@ pub extern "C" fn call() { assert!(matches!(res, Err(ReturnErrorCode::CalleeReverted))); // Fail to deploy the contract due to insufficient ref_time weight. - #[allow(deprecated)] let res = api::instantiate_v2( code_hash, 1u64, // too little ref_time weight 0u64, // How much proof_size weight to devote for the execution. 0 = all. @@ -65,7 +63,6 @@ pub extern "C" fn call() { assert!(matches!(res, Err(ReturnErrorCode::CalleeTrapped))); // Fail to deploy the contract due to insufficient proof_size weight. - #[allow(deprecated)] let res = api::instantiate_v2( code_hash, 0u64, // How much ref_time weight to devote for the execution. 0 = all. 1u64, // Too little proof_size weight @@ -78,7 +75,6 @@ pub extern "C" fn call() { let mut callee = [0u8; 32]; let callee = &mut &mut callee[..]; - #[allow(deprecated)] api::instantiate_v2( code_hash, 0u64, // How much ref_time weight to devote for the execution. 0 = all. @@ -94,7 +90,6 @@ pub extern "C" fn call() { assert_eq!(callee.len(), 32); // Call the new contract and expect it to return failing exit code. - #[allow(deprecated)] let res = api::call_v2( uapi::CallFlags::empty(), callee, @@ -108,11 +103,10 @@ pub extern "C" fn call() { assert!(matches!(res, Err(ReturnErrorCode::CalleeReverted))); // Fail to call the contract due to insufficient ref_time weight. - #[allow(deprecated)] let res = api::call_v2( uapi::CallFlags::empty(), callee, - 1u64, // too little ref_time weight + 1u64, // Too little ref_time weight. 0u64, // How much proof_size weight to devote for the execution. 0 = all. None, // No deposit limit. &value, @@ -122,7 +116,6 @@ pub extern "C" fn call() { assert!(matches!(res, Err(ReturnErrorCode::CalleeTrapped))); // Fail to call the contract due to insufficient proof_size weight. - #[allow(deprecated)] let res = api::call_v2( uapi::CallFlags::empty(), callee, @@ -137,7 +130,6 @@ pub extern "C" fn call() { // Call the contract successfully. let mut output = [0u8; 4]; - #[allow(deprecated)] api::call_v2( uapi::CallFlags::empty(), callee, diff --git a/substrate/frame/contracts/fixtures/contracts/caller_is_origin_n.rs b/substrate/frame/contracts/fixtures/contracts/caller_is_origin_n.rs new file mode 100644 index 0000000000000000000000000000000000000000..fd6f59802fa08408105070f37ae64569a32d38a7 --- /dev/null +++ b/substrate/frame/contracts/fixtures/contracts/caller_is_origin_n.rs @@ -0,0 +1,38 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! This fixture calls caller_is_origin `n` times. + +#![no_std] +#![no_main] + +use common::input; +use uapi::{HostFn, HostFnImpl as api}; + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn deploy() {} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call() { + input!(n: u32, ); + + for _ in 0..n { + let _ = api::caller_is_origin(); + } +} diff --git a/substrate/frame/contracts/fixtures/contracts/chain_extension_temp_storage.rs b/substrate/frame/contracts/fixtures/contracts/chain_extension_temp_storage.rs index 1ab08efb3c7ea49ee1e016cb6136b428e86075a3..2e15fb02a12ca5f0cd4bee944af062961d66e8b2 100644 --- a/substrate/frame/contracts/fixtures/contracts/chain_extension_temp_storage.rs +++ b/substrate/frame/contracts/fixtures/contracts/chain_extension_temp_storage.rs @@ -50,11 +50,13 @@ pub extern "C" fn call() { output!(addr, [0u8; 32], api::address,); // call self - api::call_v1( + api::call_v2( uapi::CallFlags::ALLOW_REENTRY, addr, - 0u64, // How much gas to devote for the execution. 0 = all. - &0u64.to_le_bytes(), // value transferred to the contract. + 0u64, // How much ref_time to devote for the execution. 0 = all. + 0u64, // How much proof_size to devote for the execution. 0 = all. + None, // No deposit limit. + &0u64.to_le_bytes(), // Value transferred to the contract. input, None, ) diff --git a/substrate/frame/contracts/fixtures/contracts/create_storage_and_call.rs b/substrate/frame/contracts/fixtures/contracts/create_storage_and_call.rs index 8b79dd87dffd2f35c2c597f95f4e442a855f69ea..f8ce0ff4fb84831fbf258f489d0bc6935784ff43 100644 --- a/substrate/frame/contracts/fixtures/contracts/create_storage_and_call.rs +++ b/substrate/frame/contracts/fixtures/contracts/create_storage_and_call.rs @@ -40,14 +40,13 @@ pub extern "C" fn call() { api::set_storage(buffer, &[1u8; 4]); // Call the callee - #[allow(deprecated)] api::call_v2( uapi::CallFlags::empty(), callee, 0u64, // How much ref_time weight to devote for the execution. 0 = all. 0u64, // How much proof_size weight to devote for the execution. 0 = all. Some(deposit_limit), - &0u64.to_le_bytes(), // value transferred to the contract. + &0u64.to_le_bytes(), // Value transferred to the contract. input, None, ) diff --git a/substrate/frame/contracts/fixtures/contracts/create_storage_and_instantiate.rs b/substrate/frame/contracts/fixtures/contracts/create_storage_and_instantiate.rs index c68d99eff821e86ecfa399fc6804f1e3a7b2649c..fa3b9000a7ed6bd3f5f094d39b5d025a995a8b4c 100644 --- a/substrate/frame/contracts/fixtures/contracts/create_storage_and_instantiate.rs +++ b/substrate/frame/contracts/fixtures/contracts/create_storage_and_instantiate.rs @@ -40,7 +40,6 @@ pub extern "C" fn call() { let mut address = [0u8; 32]; let address = &mut &mut address[..]; - #[allow(deprecated)] api::instantiate_v2( code_hash, 0u64, // How much ref_time weight to devote for the execution. 0 = all. diff --git a/substrate/frame/contracts/fixtures/contracts/destroy_and_transfer.rs b/substrate/frame/contracts/fixtures/contracts/destroy_and_transfer.rs index cfcfd60f21eef1c543e249832f74bd75e5c4a8d3..62fb63b56020455ea775e802b95daeacaefd3658 100644 --- a/substrate/frame/contracts/fixtures/contracts/destroy_and_transfer.rs +++ b/substrate/frame/contracts/fixtures/contracts/destroy_and_transfer.rs @@ -34,7 +34,6 @@ pub extern "C" fn deploy() { let address = &mut &mut address[..]; let salt = [71u8, 17u8]; - #[allow(deprecated)] api::instantiate_v2( code_hash, 0u64, // How much ref_time weight to devote for the execution. 0 = all. @@ -60,7 +59,6 @@ pub extern "C" fn call() { api::get_storage(&ADDRESS_KEY, callee_addr).unwrap(); // Calling the destination contract with non-empty input data should fail. - #[allow(deprecated)] let res = api::call_v2( uapi::CallFlags::empty(), callee_addr, @@ -74,7 +72,6 @@ pub extern "C" fn call() { assert!(matches!(res, Err(uapi::ReturnErrorCode::CalleeTrapped))); // Call the destination contract regularly, forcing it to self-destruct. - #[allow(deprecated)] api::call_v2( uapi::CallFlags::empty(), callee_addr, diff --git a/substrate/frame/contracts/fixtures/contracts/instantiate_return_code.rs b/substrate/frame/contracts/fixtures/contracts/instantiate_return_code.rs index 67cd739d85a6942a046cfaf7cc45301d6570600f..de194abe4840ac5fc67b83c1a377d114e65dfc01 100644 --- a/substrate/frame/contracts/fixtures/contracts/instantiate_return_code.rs +++ b/substrate/frame/contracts/fixtures/contracts/instantiate_return_code.rs @@ -31,7 +31,6 @@ pub extern "C" fn call() { input!(buffer, 36, code_hash: [u8; 32],); let input = &buffer[32..]; - #[allow(deprecated)] let err_code = match api::instantiate_v2( code_hash, 0u64, // How much ref_time weight to devote for the execution. 0 = all. diff --git a/substrate/frame/contracts/fixtures/contracts/add_remove_delegate_dependency.rs b/substrate/frame/contracts/fixtures/contracts/locking_delegate_dependency.rs similarity index 83% rename from substrate/frame/contracts/fixtures/contracts/add_remove_delegate_dependency.rs rename to substrate/frame/contracts/fixtures/contracts/locking_delegate_dependency.rs index 759ff7993740479f9e89b3c047eca27de74dbcc4..bb76c942aad18c511ab2e88bf1597101b307c2b4 100644 --- a/substrate/frame/contracts/fixtures/contracts/add_remove_delegate_dependency.rs +++ b/substrate/frame/contracts/fixtures/contracts/locking_delegate_dependency.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! This contract tests the behavior of adding / removing delegate_dependencies when delegate +//! This contract tests the behavior of locking / unlocking delegate_dependencies when delegate //! calling into a contract. #![no_std] #![no_main] @@ -34,15 +34,13 @@ fn load_input(delegate_call: bool) { ); match action { - // 1 = Add delegate dependency + // 1 = Lock delegate dependency 1 => { - #[allow(deprecated)] - api::add_delegate_dependency(code_hash); + api::lock_delegate_dependency(code_hash); }, - // 2 = Remove delegate dependency + // 2 = Unlock delegate dependency 2 => { - #[allow(deprecated)] - api::remove_delegate_dependency(code_hash); + api::unlock_delegate_dependency(code_hash); }, // 3 = Terminate 3 => { diff --git a/substrate/frame/contracts/fixtures/contracts/recurse.rs b/substrate/frame/contracts/fixtures/contracts/recurse.rs new file mode 100644 index 0000000000000000000000000000000000000000..b1ded608c2fc417323580257b74835a0aaee9aa3 --- /dev/null +++ b/substrate/frame/contracts/fixtures/contracts/recurse.rs @@ -0,0 +1,53 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! This fixture calls itself as many times as passed as argument. + +#![no_std] +#![no_main] + +use common::{input, output}; +use uapi::{HostFn, HostFnImpl as api}; + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn deploy() {} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call() { + input!(calls_left: u32, ); + + // own address + output!(addr, [0u8; 32], api::address,); + + if calls_left == 0 { + return + } + + api::call_v2( + uapi::CallFlags::ALLOW_REENTRY, + addr, + 0u64, // How much ref_time to devote for the execution. 0 = all. + 0u64, // How much deposit_limit to devote for the execution. 0 = all. + None, // No deposit limit. + &0u64.to_le_bytes(), // Value transferred to the contract. + &(calls_left - 1).to_le_bytes(), + None, + ) + .unwrap(); +} diff --git a/substrate/frame/contracts/fixtures/contracts/reentrance_count_call.rs b/substrate/frame/contracts/fixtures/contracts/reentrance_count_call.rs index 9812dce2e818d29a352927d2b47e301b06683dae..0acfe017f8df9094ef14aa9e6947b56e183d25be 100644 --- a/substrate/frame/contracts/fixtures/contracts/reentrance_count_call.rs +++ b/substrate/frame/contracts/fixtures/contracts/reentrance_count_call.rs @@ -42,11 +42,13 @@ pub extern "C" fn call() { if expected_reentrance_count != 5 { let count = (expected_reentrance_count + 1).to_le_bytes(); - api::call_v1( + api::call_v2( uapi::CallFlags::ALLOW_REENTRY, addr, - 0u64, // How much gas to devote for the execution. 0 = all. - &0u64.to_le_bytes(), // value transferred to the contract. + 0u64, // How much ref_time to devote for the execution. 0 = all. + 0u64, // How much proof_size to devote for the execution. 0 = all. + None, // No deposit limit. + &0u64.to_le_bytes(), // Value transferred to the contract. &count, None, ) diff --git a/substrate/frame/contracts/fixtures/contracts/self_destruct.rs b/substrate/frame/contracts/fixtures/contracts/self_destruct.rs index 94c3ac387a0a65228674efa93a477543b807661c..d3836d2d8c734214a67333dba9293067b4b5d9b1 100644 --- a/substrate/frame/contracts/fixtures/contracts/self_destruct.rs +++ b/substrate/frame/contracts/fixtures/contracts/self_destruct.rs @@ -37,10 +37,12 @@ pub extern "C" fn call() { if !input.is_empty() { output!(addr, [0u8; 32], api::address,); - api::call_v1( + api::call_v2( uapi::CallFlags::ALLOW_REENTRY, addr, - 0u64, // How much gas to devote for the execution. 0 = all. + 0u64, // How much ref_time to devote for the execution. 0 = all. + 0u64, // How much proof_size to devote for the execution. 0 = all. + None, // No deposit limit. &0u64.to_le_bytes(), // Value to transfer. &[0u8; 0], None, diff --git a/substrate/frame/contracts/fixtures/data/invalid_contract_no_memory.wat b/substrate/frame/contracts/fixtures/data/invalid_contract_no_memory.wat deleted file mode 100644 index 0aeefbcb7ebfe27f55fb84de0f14a3c7ec12b05c..0000000000000000000000000000000000000000 --- a/substrate/frame/contracts/fixtures/data/invalid_contract_no_memory.wat +++ /dev/null @@ -1,5 +0,0 @@ -;; A valid contract which does nothing at all -(module - (func (export "deploy")) - (func (export "call")) -) diff --git a/substrate/frame/contracts/fixtures/data/invalid_module.wat b/substrate/frame/contracts/fixtures/data/invalid_module.wat deleted file mode 100644 index e4a72f74273f9350420f737e0974516b3e5c4132..0000000000000000000000000000000000000000 --- a/substrate/frame/contracts/fixtures/data/invalid_module.wat +++ /dev/null @@ -1,8 +0,0 @@ -;; An invalid module -(module - (func (export "deploy")) - (func (export "call") - ;; imbalanced stack - (i32.const 7) - ) -) diff --git a/substrate/frame/contracts/fixtures/data/run_out_of_gas_start_fn.wat b/substrate/frame/contracts/fixtures/data/run_out_of_gas_start_fn.wat deleted file mode 100644 index 6591d7ede78c20e0d19c7c60c183b4c58e98d2dc..0000000000000000000000000000000000000000 --- a/substrate/frame/contracts/fixtures/data/run_out_of_gas_start_fn.wat +++ /dev/null @@ -1,10 +0,0 @@ -(module - (import "env" "memory" (memory 1 1)) - (start $start) - (func $start - (loop $inf (br $inf)) ;; just run out of gas - (unreachable) - ) - (func (export "call")) - (func (export "deploy")) -) diff --git a/substrate/frame/contracts/fixtures/data/seal_input_noop.wat b/substrate/frame/contracts/fixtures/data/seal_input_noop.wat deleted file mode 100644 index 7b5a1e32af4d61ba8dfecdc978e5f4d3a09e1480..0000000000000000000000000000000000000000 --- a/substrate/frame/contracts/fixtures/data/seal_input_noop.wat +++ /dev/null @@ -1,14 +0,0 @@ -;; Everything prepared for the host function call, but no call is performed. -(module - (import "seal0" "seal_input" (func $seal_input (param i32 i32))) - (import "env" "memory" (memory 1 1)) - - ;; [0, 8) buffer to write input - - ;; [8, 12) size of the input buffer - (data (i32.const 8) "\04") - - (func (export "call")) - - (func (export "deploy")) -) diff --git a/substrate/frame/contracts/fixtures/data/seal_input_once.wat b/substrate/frame/contracts/fixtures/data/seal_input_once.wat deleted file mode 100644 index 919a03a9b6903df3ff4917347b9c40a2d260b8f1..0000000000000000000000000000000000000000 --- a/substrate/frame/contracts/fixtures/data/seal_input_once.wat +++ /dev/null @@ -1,22 +0,0 @@ -;; Stores a value of the passed size. The host function is called once. -(module - (import "seal0" "seal_input" (func $seal_input (param i32 i32))) - (import "env" "memory" (memory 1 1)) - - ;; [0, 8) buffer to write input - - ;; [8, 12) size of the input buffer - (data (i32.const 8) "\04") - - (func (export "call") - ;; instructions to consume engine fuel - (drop - (i32.const 42) - ) - - (call $seal_input (i32.const 0) (i32.const 8)) - - ) - - (func (export "deploy")) -) diff --git a/substrate/frame/contracts/fixtures/data/seal_input_twice.wat b/substrate/frame/contracts/fixtures/data/seal_input_twice.wat deleted file mode 100644 index 3a8be814efb045078494294961686931c4cd7ab4..0000000000000000000000000000000000000000 --- a/substrate/frame/contracts/fixtures/data/seal_input_twice.wat +++ /dev/null @@ -1,28 +0,0 @@ -;; Stores a value of the passed size. The host function is called twice. -(module - (import "seal0" "seal_input" (func $seal_input (param i32 i32))) - (import "env" "memory" (memory 1 1)) - - ;; [0, 8) buffer to write input - - ;; [8, 12) size of the input buffer - (data (i32.const 8) "\04") - - (func (export "call") - ;; instructions to consume engine fuel - (drop - (i32.const 42) - ) - - (call $seal_input (i32.const 0) (i32.const 8)) - - ;; instructions to consume engine fuel - (drop - (i32.const 42) - ) - - (call $seal_input (i32.const 0) (i32.const 8)) - ) - - (func (export "deploy")) -) diff --git a/substrate/frame/contracts/fixtures/src/lib.rs b/substrate/frame/contracts/fixtures/src/lib.rs index e0d9d4f8bd5b1512f7998d497adacf695c5092e1..56a8e2321c5c3cc2181cbfeb676700993e3f0a77 100644 --- a/substrate/frame/contracts/fixtures/src/lib.rs +++ b/substrate/frame/contracts/fixtures/src/lib.rs @@ -16,34 +16,7 @@ // limitations under the License. use sp_runtime::traits::Hash; -use std::{env::var, fs, path::PathBuf}; - -fn wat_root_dir() -> PathBuf { - match (var("CARGO_MANIFEST_DIR"), var("CARGO_PKG_NAME")) { - // When `CARGO_MANIFEST_DIR` is not set, Rust resolves relative paths from the root folder - (Err(_), _) => "substrate/frame/contracts/fixtures/data".into(), - (Ok(path), Ok(s)) if s == "pallet-contracts" => PathBuf::from(path).join("fixtures/data"), - (Ok(path), Ok(s)) if s == "pallet-contracts-mock-network" => - PathBuf::from(path).parent().unwrap().join("fixtures/data"), - (Ok(_), pkg_name) => panic!("Failed to resolve fixture dir for tests from {pkg_name:?}."), - } -} - -/// Load a given wasm module represented by a .wat file and returns a wasm binary contents along -/// with it's hash. -/// -/// The fixture files are located under the `fixtures/` directory. -fn legacy_compile_module( - fixture_name: &str, -) -> anyhow::Result<(Vec, ::Output)> -where - T: frame_system::Config, -{ - let fixture_path = wat_root_dir().join(format!("{fixture_name}.wat")); - let wasm_binary = wat::parse_file(fixture_path)?; - let code_hash = T::Hashing::hash(&wasm_binary); - Ok((wasm_binary, code_hash)) -} +use std::{fs, path::PathBuf}; /// Load a given wasm module and returns a wasm binary contents along with it's hash. /// Use the legacy compile_module as fallback, if the rust fixture does not exist yet. @@ -53,15 +26,11 @@ pub fn compile_module( where T: frame_system::Config, { - let out_dir: std::path::PathBuf = env!("OUT_DIR").into(); + let out_dir: PathBuf = env!("OUT_DIR").into(); let fixture_path = out_dir.join(format!("{fixture_name}.wasm")); - match fs::read(fixture_path) { - Ok(wasm_binary) => { - let code_hash = T::Hashing::hash(&wasm_binary); - Ok((wasm_binary, code_hash)) - }, - Err(_) => legacy_compile_module::(fixture_name), - } + let binary = fs::read(fixture_path)?; + let code_hash = T::Hashing::hash(&binary); + Ok((binary, code_hash)) } #[cfg(test)] diff --git a/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs b/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs index dadba394e26453366d0b90ce8ff18931ab00743f..3c06131dd6088a2b044b68cfbf3b4a918eb37e3e 100644 --- a/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs +++ b/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs @@ -25,7 +25,7 @@ use frame_support::{ traits::{ConstBool, ConstU32, Contains, Randomness}, weights::Weight, }; -use frame_system::pallet_prelude::BlockNumberFor; +use frame_system::{pallet_prelude::BlockNumberFor, EnsureSigned}; use pallet_xcm::BalanceOf; use sp_runtime::{traits::Convert, Perbill}; @@ -90,9 +90,12 @@ impl pallet_contracts::Config for Runtime { type Schedule = Schedule; type Time = super::Timestamp; type UnsafeUnstableInterface = ConstBool; + type UploadOrigin = EnsureSigned; + type InstantiateOrigin = EnsureSigned; type WeightInfo = (); type WeightPrice = Self; type Debug = (); type Environment = (); + type ApiVersion = (); type Xcm = pallet_xcm::Pallet; } diff --git a/substrate/frame/contracts/proc-macro/Cargo.toml b/substrate/frame/contracts/proc-macro/Cargo.toml index 7e4a6c58666896e0f21936311d8b0edc614f1c79..4080cd0442dbc516231dc983a3b6609a211523db 100644 --- a/substrate/frame/contracts/proc-macro/Cargo.toml +++ b/substrate/frame/contracts/proc-macro/Cargo.toml @@ -19,5 +19,5 @@ proc-macro = true [dependencies] proc-macro2 = "1.0.56" -quote = "1.0.28" -syn = { version = "2.0.49", features = ["full"] } +quote = { workspace = true } +syn = { features = ["full"], workspace = true } diff --git a/substrate/frame/contracts/proc-macro/src/lib.rs b/substrate/frame/contracts/proc-macro/src/lib.rs index 403db15ac2cbcf796280a29ba4c7a91e3047b21e..1794d09d5ad28e08bf9579bed4f311f469be832c 100644 --- a/substrate/frame/contracts/proc-macro/src/lib.rs +++ b/substrate/frame/contracts/proc-macro/src/lib.rs @@ -497,9 +497,14 @@ fn expand_docs(def: &EnvDef) -> TokenStream2 { fn expand_env(def: &EnvDef, docs: bool) -> TokenStream2 { let impls = expand_impls(def); let docs = docs.then_some(expand_docs(def)).unwrap_or(TokenStream2::new()); + let stable_api_count = def.host_funcs.iter().filter(|f| f.is_stable).count(); quote! { pub struct Env; + + #[cfg(test)] + pub const STABLE_API_COUNT: usize = #stable_api_count; + #impls /// Documentation of the API (host functions) available to contracts. /// @@ -638,37 +643,34 @@ fn expand_functions(def: &EnvDef, expand_blocks: bool, host_state: TokenStream2) }; let sync_gas_before = if expand_blocks { quote! { - // Gas left in the gas meter right before switching to engine execution. - let __gas_before__ = { - let engine_consumed_total = + // Write gas from wasmi into pallet-contracts before entering the host function. + let __gas_left_before__ = { + let executor_total = __caller__.fuel_consumed().expect("Fuel metering is enabled; qed"); - let gas_meter = __caller__.data_mut().ext().gas_meter_mut(); - gas_meter - .charge_fuel(engine_consumed_total) + __caller__ + .data_mut() + .ext() + .gas_meter_mut() + .sync_from_executor(executor_total) .map_err(TrapReason::from) .map_err(#into_host)? - .ref_time() }; } } else { quote! { } }; - // Gas left in the gas meter right after returning from engine execution. + // Write gas from pallet-contracts into wasmi after leaving the host function. let sync_gas_after = if expand_blocks { quote! { - let mut gas_after = __caller__.data_mut().ext().gas_meter().gas_left().ref_time(); - let mut host_consumed = __gas_before__.saturating_sub(gas_after); - // Possible undercharge of at max 1 fuel here, if host consumed less than `instruction_weights.base` - // Not a problem though, as soon as host accounts its spent gas properly. - let fuel_consumed = host_consumed - .checked_div(__caller__.data_mut().ext().schedule().instruction_weights.base as u64) - .ok_or(Error::::InvalidSchedule) - .map_err(TrapReason::from) - .map_err(#into_host)?; + let fuel_consumed = __caller__ + .data_mut() + .ext() + .gas_meter_mut() + .sync_to_executor(__gas_left_before__) + .map_err(TrapReason::from)?; __caller__ - .consume_fuel(fuel_consumed) - .map_err(|_| TrapReason::from(Error::::OutOfGas)) - .map_err(#into_host)?; + .consume_fuel(fuel_consumed.into()) + .map_err(|_| TrapReason::from(Error::::OutOfGas))?; } } else { quote! { } diff --git a/substrate/frame/contracts/src/benchmarking/code.rs b/substrate/frame/contracts/src/benchmarking/code.rs index 644c2abf0a8d5a49443f26e38b8869c134a7805f..96ce11f8c4183a53d7e8e75fc0f6d826e861c54b 100644 --- a/substrate/frame/contracts/src/benchmarking/code.rs +++ b/substrate/frame/contracts/src/benchmarking/code.rs @@ -26,7 +26,7 @@ use crate::Config; use frame_support::traits::Get; -use sp_runtime::traits::Hash; +use sp_runtime::{traits::Hash, Saturating}; use sp_std::{borrow::ToOwned, prelude::*}; use wasm_instrument::parity_wasm::{ builder, @@ -262,22 +262,25 @@ impl WasmModule { /// `instantiate_with_code` for different sizes of wasm modules. The generated module maximizes /// instrumentation runtime by nesting blocks as deeply as possible given the byte budget. /// `code_location`: Whether to place the code into `deploy` or `call`. - pub fn sized(target_bytes: u32, code_location: Location) -> Self { + pub fn sized(target_bytes: u32, code_location: Location, use_float: bool) -> Self { use self::elements::Instruction::{End, GetLocal, If, Return}; // Base size of a contract is 63 bytes and each expansion adds 6 bytes. // We do one expansion less to account for the code section and function body // size fields inside the binary wasm module representation which are leb128 encoded // and therefore grow in size when the contract grows. We are not allowed to overshoot // because of the maximum code size that is enforced by `instantiate_with_code`. - let expansions = (target_bytes.saturating_sub(63) / 6).saturating_sub(1); + let mut expansions = (target_bytes.saturating_sub(63) / 6).saturating_sub(1); const EXPANSION: [Instruction; 4] = [GetLocal(0), If(BlockType::NoResult), Return, End]; + let mut locals = vec![Local::new(1, ValueType::I32)]; + if use_float { + locals.push(Local::new(1, ValueType::F32)); + locals.push(Local::new(2, ValueType::F32)); + locals.push(Local::new(3, ValueType::F32)); + expansions.saturating_dec(); + } let mut module = ModuleDefinition { memory: Some(ImportedMemory::max::()), ..Default::default() }; - let body = Some(body::repeated_with_locals( - &[Local::new(1, ValueType::I32)], - expansions, - &EXPANSION, - )); + let body = Some(body::repeated_with_locals(&locals, expansions, &EXPANSION)); match code_location { Location::Call => module.call_body = body, Location::Deploy => module.deploy_body = body, diff --git a/substrate/frame/contracts/src/benchmarking/mod.rs b/substrate/frame/contracts/src/benchmarking/mod.rs index d6c24daa7c4da6222bad784f3140ac1188df3900..794777ef05cfae248542c61a811875925deaed45 100644 --- a/substrate/frame/contracts/src/benchmarking/mod.rs +++ b/substrate/frame/contracts/src/benchmarking/mod.rs @@ -362,7 +362,7 @@ benchmarks! { call_with_code_per_byte { let c in 0 .. T::MaxCodeLen::get(); let instance = Contract::::with_caller( - whitelisted_caller(), WasmModule::sized(c, Location::Deploy), vec![], + whitelisted_caller(), WasmModule::sized(c, Location::Deploy, false), vec![], )?; let value = Pallet::::min_balance(); let origin = RawOrigin::Signed(instance.caller.clone()); @@ -389,7 +389,7 @@ benchmarks! { let value = Pallet::::min_balance(); let caller = whitelisted_caller(); T::Currency::set_balance(&caller, caller_funding::()); - let WasmModule { code, hash, .. } = WasmModule::::sized(c, Location::Call); + let WasmModule { code, hash, .. } = WasmModule::::sized(c, Location::Call, false); let origin = RawOrigin::Signed(caller.clone()); let addr = Contracts::::contract_address(&caller, &hash, &input, &salt); }: _(origin, value, Weight::MAX, None, code, input, salt) @@ -468,19 +468,36 @@ benchmarks! { // It creates a maximum number of metering blocks per byte. // `c`: Size of the code in bytes. #[pov_mode = Measured] - upload_code { + upload_code_determinism_enforced { let c in 0 .. T::MaxCodeLen::get(); let caller = whitelisted_caller(); T::Currency::set_balance(&caller, caller_funding::()); - let WasmModule { code, hash, .. } = WasmModule::::sized(c, Location::Call); + let WasmModule { code, hash, .. } = WasmModule::::sized(c, Location::Call, false); let origin = RawOrigin::Signed(caller.clone()); - }: _(origin, code, None, Determinism::Enforced) + }: upload_code(origin, code, None, Determinism::Enforced) verify { // uploading the code reserves some balance in the callers account assert!(T::Currency::total_balance_on_hold(&caller) > 0u32.into()); assert!(>::code_exists(&hash)); } + // Uploading code with [`Determinism::Relaxed`] should be more expensive than uploading code with [`Determinism::Enforced`], + // as we always try to save the code with [`Determinism::Enforced`] first. + #[pov_mode = Measured] + upload_code_determinism_relaxed { + let c in 0 .. T::MaxCodeLen::get(); + let caller = whitelisted_caller(); + T::Currency::set_balance(&caller, caller_funding::()); + let WasmModule { code, hash, .. } = WasmModule::::sized(c, Location::Call, true); + let origin = RawOrigin::Signed(caller.clone()); + }: upload_code(origin, code, None, Determinism::Relaxed) + verify { + assert!(T::Currency::total_balance_on_hold(&caller) > 0u32.into()); + assert!(>::code_exists(&hash)); + // Ensure that the benchmark follows the most expensive path, i.e., the code is saved with [`Determinism::Relaxed`] after trying to save it with [`Determinism::Enforced`]. + assert_eq!(CodeInfoOf::::get(&hash).unwrap().determinism(), Determinism::Relaxed); + } + // Removing code does not depend on the size of the contract because all the information // needed to verify the removal claim (refcount, owner) is stored in a separate storage // item (`CodeInfoOf`). @@ -895,7 +912,7 @@ benchmarks! { }, ImportedFunction { module: "seal0", - name: "add_delegate_dependency", + name: "lock_delegate_dependency", params: vec![ValueType::I32], return_type: None, } @@ -910,7 +927,7 @@ benchmarks! { value: code_hashes_bytes, }, ], - deploy_body: Some(body::repeated_dyn(r, vec![ + deploy_body: Some(body::repeated_dyn(T::MaxDelegateDependencies::get(), vec![ Counter(beneficiary_len as u32, code_hash_len as u32), // code_hash_ptr Regular(Instruction::Call(1)), ])), @@ -926,6 +943,7 @@ benchmarks! { assert_eq!(T::Currency::total_balance(&beneficiary), 0u32.into()); assert_eq!(T::Currency::balance(&instance.account_id), Pallet::::min_balance() * 2u32.into()); assert_ne!(T::Currency::balance_on_hold(&HoldReason::StorageDepositReserve.into(), &instance.account_id), 0u32.into()); + assert_eq!(ContractInfoOf::::get(&instance.account_id).unwrap().delegate_dependencies_count() as u32, T::MaxDelegateDependencies::get()); }: call(origin, instance.addr.clone(), 0u32.into(), Weight::MAX, None, vec![]) verify { if r > 0 { @@ -2402,7 +2420,7 @@ benchmarks! { }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) #[pov_mode = Measured] - add_delegate_dependency { + lock_delegate_dependency { let r in 0 .. T::MaxDelegateDependencies::get(); let code_hashes = (0..r) .map(|i| { @@ -2420,7 +2438,7 @@ benchmarks! { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { module: "seal0", - name: "add_delegate_dependency", + name: "lock_delegate_dependency", params: vec![ValueType::I32], return_type: None, }], @@ -2440,7 +2458,7 @@ benchmarks! { let origin = RawOrigin::Signed(instance.caller.clone()); }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) - remove_delegate_dependency { + unlock_delegate_dependency { let r in 0 .. T::MaxDelegateDependencies::get(); let code_hashes = (0..r) .map(|i| { @@ -2459,12 +2477,12 @@ benchmarks! { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { module: "seal0", - name: "remove_delegate_dependency", + name: "unlock_delegate_dependency", params: vec![ValueType::I32], return_type: None, }, ImportedFunction { module: "seal0", - name: "add_delegate_dependency", + name: "lock_delegate_dependency", params: vec![ValueType::I32], return_type: None }], diff --git a/substrate/frame/contracts/src/exec.rs b/substrate/frame/contracts/src/exec.rs index 79220021efe4870eb3c37d42c8bacb2026548b21..7da321af547d5b8f5539673f7540c328299945b6 100644 --- a/substrate/frame/contracts/src/exec.rs +++ b/substrate/frame/contracts/src/exec.rs @@ -348,20 +348,20 @@ pub trait Ext: sealing::Sealed { /// - [`Error::::MaxDelegateDependenciesReached`] /// - [`Error::::CannotAddSelfAsDelegateDependency`] /// - [`Error::::DelegateDependencyAlreadyExists`] - fn add_delegate_dependency( + fn lock_delegate_dependency( &mut self, code_hash: CodeHash, ) -> Result<(), DispatchError>; /// Removes a delegate dependency from [`ContractInfo`]'s `delegate_dependencies` field. /// - /// This is the counterpart of [`Self::add_delegate_dependency`]. It decreases the reference - /// count and refunds the deposit that was charged by [`Self::add_delegate_dependency`]. + /// This is the counterpart of [`Self::lock_delegate_dependency`]. It decreases the reference + /// count and refunds the deposit that was charged by [`Self::lock_delegate_dependency`]. /// /// # Errors /// /// - [`Error::::DelegateDependencyNotFound`] - fn remove_delegate_dependency( + fn unlock_delegate_dependency( &mut self, code_hash: &CodeHash, ) -> Result<(), DispatchError>; @@ -1554,7 +1554,7 @@ where }); } - fn add_delegate_dependency( + fn lock_delegate_dependency( &mut self, code_hash: CodeHash, ) -> Result<(), DispatchError> { @@ -1565,7 +1565,7 @@ where let code_info = CodeInfoOf::::get(code_hash).ok_or(Error::::CodeNotFound)?; let deposit = T::CodeHashLockupDepositPercent::get().mul_ceil(code_info.deposit()); - info.add_delegate_dependency(code_hash, deposit)?; + info.lock_delegate_dependency(code_hash, deposit)?; Self::increment_refcount(code_hash)?; frame .nested_storage @@ -1573,14 +1573,14 @@ where Ok(()) } - fn remove_delegate_dependency( + fn unlock_delegate_dependency( &mut self, code_hash: &CodeHash, ) -> Result<(), DispatchError> { let frame = self.top_frame_mut(); let info = frame.contract_info.get(&frame.account_id); - let deposit = info.remove_delegate_dependency(code_hash)?; + let deposit = info.unlock_delegate_dependency(code_hash)?; Self::decrement_refcount(*code_hash); frame .nested_storage diff --git a/substrate/frame/contracts/src/gas.rs b/substrate/frame/contracts/src/gas.rs index 9271b615d00263c809e17dd68761790ea2fa9e7c..b9d91f38f16f957ae6bc40ef08010ecf146dbf03 100644 --- a/substrate/frame/contracts/src/gas.rs +++ b/substrate/frame/contracts/src/gas.rs @@ -23,7 +23,7 @@ use frame_support::{ DefaultNoBound, }; use sp_core::Get; -use sp_runtime::{traits::Zero, DispatchError}; +use sp_runtime::{traits::Zero, DispatchError, Saturating}; #[cfg(test)] use std::{any::Any, fmt::Debug}; @@ -37,6 +37,24 @@ impl ChargedAmount { } } +/// Used to capture the gas left before entering a host function. +/// +/// Has to be consumed in order to sync back the gas after leaving the host function. +#[must_use] +pub struct RefTimeLeft(u64); + +/// Resource that needs to be synced to the executor. +/// +/// Wrapped to make sure that the resource will be synced back the the executor. +#[must_use] +pub struct Syncable(u64); + +impl From for u64 { + fn from(from: Syncable) -> u64 { + from.0 + } +} + #[cfg(not(test))] pub trait TestAuxiliaries {} #[cfg(not(test))] @@ -84,8 +102,13 @@ pub struct GasMeter { gas_left: Weight, /// Due to `adjust_gas` and `nested` the `gas_left` can temporarily dip below its final value. gas_left_lowest: Weight, - /// Amount of fuel consumed by the engine from the last host function call. - engine_consumed: u64, + /// The amount of resources that was consumed by the execution engine. + /// + /// This should be equivalent to `self.gas_consumed().ref_time()` but expressed in whatever + /// unit the execution engine uses to track resource consumption. We have to track it + /// separately in order to avoid the loss of precision that happens when converting from + /// ref_time to the execution engine unit. + executor_consumed: u64, _phantom: PhantomData, #[cfg(test)] tokens: Vec, @@ -97,7 +120,7 @@ impl GasMeter { gas_limit, gas_left: gas_limit, gas_left_lowest: gas_limit, - engine_consumed: Default::default(), + executor_consumed: 0, _phantom: PhantomData, #[cfg(test)] tokens: Vec::new(), @@ -172,32 +195,41 @@ impl GasMeter { self.gas_left = self.gas_left.saturating_add(adjustment).min(self.gas_limit); } - /// This method is used for gas syncs with the engine. + /// Hand over the gas metering responsibility from the executor to this meter. /// - /// Updates internal `engine_comsumed` tracker of engine fuel consumption. + /// Needs to be called when entering a host function to update this meter with the + /// gas that was tracked by the executor. It tracks the latest seen total value + /// in order to compute the delta that needs to be charged. + pub fn sync_from_executor( + &mut self, + executor_total: u64, + ) -> Result { + let chargable_reftime = executor_total + .saturating_sub(self.executor_consumed) + .saturating_mul(u64::from(T::Schedule::get().instruction_weights.base)); + self.executor_consumed = executor_total; + self.gas_left + .checked_reduce(Weight::from_parts(chargable_reftime, 0)) + .ok_or_else(|| Error::::OutOfGas)?; + Ok(RefTimeLeft(self.gas_left.ref_time())) + } + + /// Hand over the gas metering responsibility from this meter to the executor. /// - /// Charges self with the `ref_time` Weight corresponding to wasmi fuel consumed on the engine - /// side since last sync. Passed value is scaled by multiplying it by the weight of a basic - /// operation, as such an operation in wasmi engine costs 1. + /// Needs to be called when leaving a host function in order to calculate how much + /// gas needs to be charged from the **executor**. It updates the last seen executor + /// total value so that it is correct when `sync_from_executor` is called the next time. /// - /// Returns the updated `gas_left` `Weight` value from the meter. - /// Normally this would never fail, as engine should fail first when out of gas. - pub fn charge_fuel(&mut self, wasmi_fuel_total: u64) -> Result { - // Take the part consumed since the last update. - let wasmi_fuel = wasmi_fuel_total.saturating_sub(self.engine_consumed); - if !wasmi_fuel.is_zero() { - self.engine_consumed = wasmi_fuel_total; - let reftime_consumed = - wasmi_fuel.saturating_mul(T::Schedule::get().instruction_weights.base as u64); - let ref_time_left = self - .gas_left - .ref_time() - .checked_sub(reftime_consumed) - .ok_or_else(|| Error::::OutOfGas)?; - - *(self.gas_left.ref_time_mut()) = ref_time_left; - } - Ok(self.gas_left) + /// It is important that this does **not** actually sync with the executor. That has + /// to be done by the caller. + pub fn sync_to_executor(&mut self, before: RefTimeLeft) -> Result { + let chargable_executor_resource = before + .0 + .saturating_sub(self.gas_left().ref_time()) + .checked_div(u64::from(T::Schedule::get().instruction_weights.base)) + .ok_or(Error::::InvalidSchedule)?; + self.executor_consumed.saturating_accrue(chargable_executor_resource); + Ok(Syncable(chargable_executor_resource)) } /// Returns the amount of gas that is required to run the same call. diff --git a/substrate/frame/contracts/src/lib.rs b/substrate/frame/contracts/src/lib.rs index 533085f2b874f6cc1d6c122fa6db5c8498c3db0c..de84d5220c548a29cf046c85b8fea0520a5d3e91 100644 --- a/substrate/frame/contracts/src/lib.rs +++ b/substrate/frame/contracts/src/lib.rs @@ -214,6 +214,27 @@ pub struct Environment { block_number: EnvironmentType>, } +/// Defines the current version of the HostFn APIs. +/// This is used to communicate the available APIs in pallet-contracts. +/// +/// The version is bumped any time a new HostFn is added or stabilized. +#[derive(Encode, Decode, TypeInfo)] +pub struct ApiVersion(u16); +impl Default for ApiVersion { + fn default() -> Self { + Self(2) + } +} + +#[test] +fn api_version_is_up_to_date() { + assert_eq!( + 109, + crate::wasm::STABLE_API_COUNT, + "Stable API count has changed. Bump the returned value of ApiVersion::default() and update the test." + ); +} + #[frame_support::pallet] pub mod pallet { use super::*; @@ -222,7 +243,7 @@ pub mod pallet { use frame_system::pallet_prelude::*; use sp_runtime::Perbill; - /// The current storage version. + /// The in-code storage version. pub(crate) const STORAGE_VERSION: StorageVersion = StorageVersion::new(15); #[pallet::pallet] @@ -325,7 +346,7 @@ pub mod pallet { type DepositPerItem: Get>; /// The percentage of the storage deposit that should be held for using a code hash. - /// Instantiating a contract, or calling [`chain_extension::Ext::add_delegate_dependency`] + /// Instantiating a contract, or calling [`chain_extension::Ext::lock_delegate_dependency`] /// protects the code from being removed. In order to prevent abuse these actions are /// protected with a percentage of the code deposit. #[pallet::constant] @@ -347,7 +368,7 @@ pub mod pallet { type MaxStorageKeyLen: Get; /// The maximum number of delegate_dependencies that a contract can lock with - /// [`chain_extension::Ext::add_delegate_dependency`]. + /// [`chain_extension::Ext::lock_delegate_dependency`]. #[pallet::constant] type MaxDelegateDependencies: Get; @@ -367,6 +388,24 @@ pub mod pallet { #[pallet::constant] type MaxDebugBufferLen: Get; + /// Origin allowed to upload code. + /// + /// By default, it is safe to set this to `EnsureSigned`, allowing anyone to upload contract + /// code. + type UploadOrigin: EnsureOrigin; + + /// Origin allowed to instantiate code. + /// + /// # Note + /// + /// This is not enforced when a contract instantiates another contract. The + /// [`Self::UploadOrigin`] should make sure that no code is deployed that does unwanted + /// instantiations. + /// + /// By default, it is safe to set this to `EnsureSigned`, allowing anyone to instantiate + /// contract code. + type InstantiateOrigin: EnsureOrigin; + /// Overarching hold reason. type RuntimeHoldReason: From; @@ -402,6 +441,12 @@ pub mod pallet { #[pallet::constant] type Environment: Get>; + /// The version of the HostFn APIs that are available in the runtime. + /// + /// Only valid value is `()`. + #[pallet::constant] + type ApiVersion: Get; + /// A type that exposes XCM APIs, allowing contracts to interact with other parachains, and /// execute XCM programs. type Xcm: xcm_builder::Controller< @@ -609,8 +654,17 @@ pub mod pallet { /// To avoid this situation a constructor could employ access control so that it can /// only be instantiated by permissioned entities. The same is true when uploading /// through [`Self::instantiate_with_code`]. + /// + /// Use [`Determinism::Relaxed`] exclusively for non-deterministic code. If the uploaded + /// code is deterministic, specifying [`Determinism::Relaxed`] will be disregarded and + /// result in higher gas costs. #[pallet::call_index(3)] - #[pallet::weight(T::WeightInfo::upload_code(code.len() as u32))] + #[pallet::weight( + match determinism { + Determinism::Enforced => T::WeightInfo::upload_code_determinism_enforced(code.len() as u32), + Determinism::Relaxed => T::WeightInfo::upload_code_determinism_relaxed(code.len() as u32), + } + )] pub fn upload_code( origin: OriginFor, code: Vec, @@ -618,7 +672,7 @@ pub mod pallet { determinism: Determinism, ) -> DispatchResult { Migration::::ensure_migrated()?; - let origin = ensure_signed(origin)?; + let origin = T::UploadOrigin::ensure_origin(origin)?; Self::bare_upload_code(origin, code, storage_deposit_limit.map(Into::into), determinism) .map(|_| ()) } @@ -767,11 +821,17 @@ pub mod pallet { salt: Vec, ) -> DispatchResultWithPostInfo { Migration::::ensure_migrated()?; - let origin = ensure_signed(origin)?; + + // These two origins will usually be the same; however, we treat them as separate since + // it is possible for the `Success` value of `UploadOrigin` and `InstantiateOrigin` to + // differ. + let upload_origin = T::UploadOrigin::ensure_origin(origin.clone())?; + let instantiate_origin = T::InstantiateOrigin::ensure_origin(origin)?; + let code_len = code.len() as u32; let (module, upload_deposit) = Self::try_upload_code( - origin.clone(), + upload_origin, code, storage_deposit_limit.clone().map(Into::into), Determinism::Enforced, @@ -785,7 +845,7 @@ pub mod pallet { let data_len = data.len() as u32; let salt_len = salt.len() as u32; let common = CommonInput { - origin: Origin::from_account_id(origin), + origin: Origin::from_account_id(instantiate_origin), value, data, gas_limit, @@ -826,10 +886,11 @@ pub mod pallet { salt: Vec, ) -> DispatchResultWithPostInfo { Migration::::ensure_migrated()?; + let origin = T::InstantiateOrigin::ensure_origin(origin)?; let data_len = data.len() as u32; let salt_len = salt.len() as u32; let common = CommonInput { - origin: Origin::from_runtime_origin(origin)?, + origin: Origin::from_account_id(origin), value, data, gas_limit, diff --git a/substrate/frame/contracts/src/migration.rs b/substrate/frame/contracts/src/migration.rs index 6d61cb6b1e1af158b9d4942efabc73267f8e44f7..f30ae1ebfadee55eeca000e74b4aac76f93f7551 100644 --- a/substrate/frame/contracts/src/migration.rs +++ b/substrate/frame/contracts/src/migration.rs @@ -263,10 +263,10 @@ impl Migration { impl OnRuntimeUpgrade for Migration { fn on_runtime_upgrade() -> Weight { let name = >::name(); - let current_version = >::current_storage_version(); + let in_code_version = >::in_code_storage_version(); let on_chain_version = >::on_chain_storage_version(); - if on_chain_version == current_version { + if on_chain_version == in_code_version { log::warn!( target: LOG_TARGET, "{name}: No Migration performed storage_version = latest_version = {:?}", @@ -289,7 +289,7 @@ impl OnRuntimeUpgrade for Migration OnRuntimeUpgrade for Migration>::on_chain_storage_version(); - let current_version = >::current_storage_version(); + let in_code_version = >::in_code_storage_version(); - if on_chain_version == current_version { + if on_chain_version == in_code_version { return Ok(Default::default()) } log::debug!( target: LOG_TARGET, - "Requested migration of {} from {:?}(on-chain storage version) to {:?}(current storage version)", - >::name(), on_chain_version, current_version + "Requested migration of {} from {:?}(on-chain storage version) to {:?}(in-code storage version)", + >::name(), on_chain_version, in_code_version ); ensure!( - T::Migrations::is_upgrade_supported(on_chain_version, current_version), - "Unsupported upgrade: VERSION_RANGE should be (on-chain storage version + 1, current storage version)" + T::Migrations::is_upgrade_supported(on_chain_version, in_code_version), + "Unsupported upgrade: VERSION_RANGE should be (on-chain storage version + 1, in-code storage version)" ); Ok(Default::default()) @@ -421,7 +421,7 @@ impl Migration { }, StepResult::Completed { steps_done } => { in_progress_version.put::>(); - if >::current_storage_version() != in_progress_version { + if >::in_code_storage_version() != in_progress_version { log::info!( target: LOG_TARGET, "{name}: Next migration is {:?},", diff --git a/substrate/frame/contracts/src/migration/v09.rs b/substrate/frame/contracts/src/migration/v09.rs index 98fcccc2c0becedccd4bd15eed98b36fd52662ee..f19bff9d6747f8359558e2c2d5a289fad668bfaa 100644 --- a/substrate/frame/contracts/src/migration/v09.rs +++ b/substrate/frame/contracts/src/migration/v09.rs @@ -28,7 +28,7 @@ use frame_support::{pallet_prelude::*, storage_alias, DefaultNoBound, Identity}; use sp_runtime::TryRuntimeError; use sp_std::prelude::*; -mod old { +mod v8 { use super::*; #[derive(Encode, Decode)] @@ -50,14 +50,14 @@ mod old { #[cfg(feature = "runtime-benchmarks")] pub fn store_old_dummy_code(len: usize) { use sp_runtime::traits::Hash; - let module = old::PrefabWasmModule { + let module = v8::PrefabWasmModule { instruction_weights_version: 0, initial: 0, maximum: 0, code: vec![42u8; len], }; let hash = T::Hashing::hash(&module.code); - old::CodeStorage::::insert(hash, module); + v8::CodeStorage::::insert(hash, module); } #[derive(Encode, Decode)] @@ -89,9 +89,9 @@ impl MigrationStep for Migration { fn step(&mut self) -> (IsFinished, Weight) { let mut iter = if let Some(last_key) = self.last_code_hash.take() { - old::CodeStorage::::iter_from(old::CodeStorage::::hashed_key_for(last_key)) + v8::CodeStorage::::iter_from(v8::CodeStorage::::hashed_key_for(last_key)) } else { - old::CodeStorage::::iter() + v8::CodeStorage::::iter() }; if let Some((key, old)) = iter.next() { @@ -115,7 +115,7 @@ impl MigrationStep for Migration { #[cfg(feature = "try-runtime")] fn pre_upgrade_step() -> Result, TryRuntimeError> { - let sample: Vec<_> = old::CodeStorage::::iter().take(100).collect(); + let sample: Vec<_> = v8::CodeStorage::::iter().take(100).collect(); log::debug!(target: LOG_TARGET, "Taking sample of {} contract codes", sample.len()); Ok(sample.encode()) @@ -123,7 +123,7 @@ impl MigrationStep for Migration { #[cfg(feature = "try-runtime")] fn post_upgrade_step(state: Vec) -> Result<(), TryRuntimeError> { - let sample = , old::PrefabWasmModule)> as Decode>::decode(&mut &state[..]) + let sample = , v8::PrefabWasmModule)> as Decode>::decode(&mut &state[..]) .expect("pre_upgrade_step provides a valid state; qed"); log::debug!(target: LOG_TARGET, "Validating sample of {} contract codes", sample.len()); diff --git a/substrate/frame/contracts/src/migration/v10.rs b/substrate/frame/contracts/src/migration/v10.rs index d64673aac7d268df80adb955ff3d5c5fd16c5681..1bee86e6a773a9b0d7720438644a4ae84d9bd332 100644 --- a/substrate/frame/contracts/src/migration/v10.rs +++ b/substrate/frame/contracts/src/migration/v10.rs @@ -47,7 +47,7 @@ use sp_runtime::{ }; use sp_std::prelude::*; -mod old { +mod v9 { use super::*; pub type BalanceOf = ( ) where OldCurrency: ReservableCurrency<::AccountId> + 'static, { - let info = old::ContractInfo { + let info = v9::ContractInfo { trie_id: info.trie_id, code_hash: info.code_hash, storage_bytes: Default::default(), @@ -94,7 +94,7 @@ pub fn store_old_contract_info( storage_item_deposit: Default::default(), storage_base_deposit: Default::default(), }; - old::ContractInfoOf::::insert(account, info); + v9::ContractInfoOf::::insert(account, info); } #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebugNoBound, TypeInfo, MaxEncodedLen)] @@ -120,9 +120,9 @@ where pub code_hash: CodeHash, storage_bytes: u32, storage_items: u32, - pub storage_byte_deposit: old::BalanceOf, - storage_item_deposit: old::BalanceOf, - storage_base_deposit: old::BalanceOf, + pub storage_byte_deposit: v9::BalanceOf, + storage_item_deposit: v9::BalanceOf, + storage_base_deposit: v9::BalanceOf, } #[derive(Encode, Decode, MaxEncodedLen, DefaultNoBound)] @@ -152,7 +152,7 @@ fn deposit_address( impl MigrationStep for Migration where OldCurrency: ReservableCurrency<::AccountId> - + Inspect<::AccountId, Balance = old::BalanceOf>, + + Inspect<::AccountId, Balance = v9::BalanceOf>, { const VERSION: u16 = 10; @@ -162,11 +162,11 @@ where fn step(&mut self) -> (IsFinished, Weight) { let mut iter = if let Some(last_account) = self.last_account.take() { - old::ContractInfoOf::::iter_from( - old::ContractInfoOf::::hashed_key_for(last_account), + v9::ContractInfoOf::::iter_from( + v9::ContractInfoOf::::hashed_key_for(last_account), ) } else { - old::ContractInfoOf::::iter() + v9::ContractInfoOf::::iter() }; if let Some((account, contract)) = iter.next() { @@ -276,7 +276,7 @@ where #[cfg(feature = "try-runtime")] fn pre_upgrade_step() -> Result, TryRuntimeError> { - let sample: Vec<_> = old::ContractInfoOf::::iter().take(10).collect(); + let sample: Vec<_> = v9::ContractInfoOf::::iter().take(10).collect(); log::debug!(target: LOG_TARGET, "Taking sample of {} contracts", sample.len()); Ok(sample.encode()) @@ -284,7 +284,7 @@ where #[cfg(feature = "try-runtime")] fn post_upgrade_step(state: Vec) -> Result<(), TryRuntimeError> { - let sample = )> as Decode>::decode( + let sample = )> as Decode>::decode( &mut &state[..], ) .expect("pre_upgrade_step provides a valid state; qed"); diff --git a/substrate/frame/contracts/src/migration/v11.rs b/substrate/frame/contracts/src/migration/v11.rs index a5b11f6e08977fbbeb737bc2650ad318ddac97c8..9bfbb25edfb44a8072d2d0073ec4cdb4d27cc360 100644 --- a/substrate/frame/contracts/src/migration/v11.rs +++ b/substrate/frame/contracts/src/migration/v11.rs @@ -29,7 +29,7 @@ use sp_runtime::TryRuntimeError; use codec::{Decode, Encode}; use frame_support::{pallet_prelude::*, storage_alias, DefaultNoBound}; use sp_std::{marker::PhantomData, prelude::*}; -mod old { +mod v10 { use super::*; #[derive(Encode, Decode, TypeInfo, MaxEncodedLen)] @@ -51,11 +51,11 @@ pub struct DeletionQueueManager { #[cfg(any(feature = "runtime-benchmarks", feature = "try-runtime"))] pub fn fill_old_queue(len: usize) { - let queue: Vec = - core::iter::repeat_with(|| old::DeletedContract { trie_id: Default::default() }) + let queue: Vec = + core::iter::repeat_with(|| v10::DeletedContract { trie_id: Default::default() }) .take(len) .collect(); - old::DeletionQueue::::set(Some(queue)); + v10::DeletionQueue::::set(Some(queue)); } #[storage_alias] @@ -80,7 +80,7 @@ impl MigrationStep for Migration { } fn step(&mut self) -> (IsFinished, Weight) { - let Some(old_queue) = old::DeletionQueue::::take() else { + let Some(old_queue) = v10::DeletionQueue::::take() else { return (IsFinished::Yes, Weight::zero()) }; let len = old_queue.len(); @@ -106,7 +106,7 @@ impl MigrationStep for Migration { #[cfg(feature = "try-runtime")] fn pre_upgrade_step() -> Result, TryRuntimeError> { - let old_queue = old::DeletionQueue::::take().unwrap_or_default(); + let old_queue = v10::DeletionQueue::::take().unwrap_or_default(); if old_queue.is_empty() { let len = 10u32; diff --git a/substrate/frame/contracts/src/migration/v12.rs b/substrate/frame/contracts/src/migration/v12.rs index 7dee31503101ba753b0e807d11621be711d4b58b..d9128286df389f84b451660e8d00f4a886b629d4 100644 --- a/substrate/frame/contracts/src/migration/v12.rs +++ b/substrate/frame/contracts/src/migration/v12.rs @@ -34,7 +34,7 @@ use sp_runtime::TryRuntimeError; use sp_runtime::{traits::Zero, FixedPointNumber, FixedU128, Saturating}; use sp_std::prelude::*; -mod old { +mod v11 { use super::*; pub type BalanceOf = , #[codec(compact)] - deposit: old::BalanceOf, + deposit: v11::BalanceOf, #[codec(compact)] refcount: u64, determinism: Determinism, @@ -112,17 +112,17 @@ where let hash = T::Hashing::hash(&code); PristineCode::::insert(hash, code.clone()); - let module = old::PrefabWasmModule { + let module = v11::PrefabWasmModule { instruction_weights_version: Default::default(), initial: Default::default(), maximum: Default::default(), code, determinism: Determinism::Enforced, }; - old::CodeStorage::::insert(hash, module); + v11::CodeStorage::::insert(hash, module); - let info = old::OwnerInfo { owner: account, deposit: u32::MAX.into(), refcount: u64::MAX }; - old::OwnerInfoOf::::insert(hash, info); + let info = v11::OwnerInfo { owner: account, deposit: u32::MAX.into(), refcount: u64::MAX }; + v11::OwnerInfoOf::::insert(hash, info); } #[derive(Encode, Decode, MaxEncodedLen, DefaultNoBound)] @@ -148,16 +148,16 @@ where fn step(&mut self) -> (IsFinished, Weight) { let mut iter = if let Some(last_key) = self.last_code_hash.take() { - old::OwnerInfoOf::::iter_from( - old::OwnerInfoOf::::hashed_key_for(last_key), + v11::OwnerInfoOf::::iter_from( + v11::OwnerInfoOf::::hashed_key_for(last_key), ) } else { - old::OwnerInfoOf::::iter() + v11::OwnerInfoOf::::iter() }; if let Some((hash, old_info)) = iter.next() { log::debug!(target: LOG_TARGET, "Migrating OwnerInfo for code_hash {:?}", hash); - let module = old::CodeStorage::::take(hash) + let module = v11::CodeStorage::::take(hash) .expect(format!("No PrefabWasmModule found for code_hash: {:?}", hash).as_str()); let code_len = module.code.len(); @@ -184,7 +184,7 @@ where let bytes_before = module .encoded_size() .saturating_add(code_len) - .saturating_add(old::OwnerInfo::::max_encoded_len()) + .saturating_add(v11::OwnerInfo::::max_encoded_len()) as u32; let items_before = 3u32; let deposit_expected_before = price_per_byte @@ -241,10 +241,10 @@ where fn pre_upgrade_step() -> Result, TryRuntimeError> { let len = 100; log::debug!(target: LOG_TARGET, "Taking sample of {} OwnerInfo(s)", len); - let sample: Vec<_> = old::OwnerInfoOf::::iter() + let sample: Vec<_> = v11::OwnerInfoOf::::iter() .take(len) .map(|(k, v)| { - let module = old::CodeStorage::::get(k) + let module = v11::CodeStorage::::get(k) .expect("No PrefabWasmModule found for code_hash: {:?}"); let info: CodeInfo = CodeInfo { determinism: module.determinism, @@ -258,9 +258,9 @@ where .collect(); let storage: u32 = - old::CodeStorage::::iter().map(|(_k, v)| v.encoded_size() as u32).sum(); - let mut deposit: old::BalanceOf = Default::default(); - old::OwnerInfoOf::::iter().for_each(|(_k, v)| deposit += v.deposit); + v11::CodeStorage::::iter().map(|(_k, v)| v.encoded_size() as u32).sum(); + let mut deposit: v11::BalanceOf = Default::default(); + v11::OwnerInfoOf::::iter().for_each(|(_k, v)| deposit += v.deposit); Ok((sample, deposit, storage).encode()) } @@ -269,7 +269,7 @@ where fn post_upgrade_step(state: Vec) -> Result<(), TryRuntimeError> { let state = <( Vec<(CodeHash, CodeInfo)>, - old::BalanceOf, + v11::BalanceOf, u32, ) as Decode>::decode(&mut &state[..]) .unwrap(); @@ -283,7 +283,7 @@ where ensure!(info.refcount == old.refcount, "invalid refcount"); } - if let Some((k, _)) = old::CodeStorage::::iter().next() { + if let Some((k, _)) = v11::CodeStorage::::iter().next() { log::warn!( target: LOG_TARGET, "CodeStorage is still NOT empty, found code_hash: {:?}", @@ -292,7 +292,7 @@ where } else { log::debug!(target: LOG_TARGET, "CodeStorage is empty."); } - if let Some((k, _)) = old::OwnerInfoOf::::iter().next() { + if let Some((k, _)) = v11::OwnerInfoOf::::iter().next() { log::warn!( target: LOG_TARGET, "OwnerInfoOf is still NOT empty, found code_hash: {:?}", @@ -302,7 +302,7 @@ where log::debug!(target: LOG_TARGET, "OwnerInfoOf is empty."); } - let mut deposit: old::BalanceOf = Default::default(); + let mut deposit: v11::BalanceOf = Default::default(); let mut items = 0u32; let mut storage_info = 0u32; CodeInfoOf::::iter().for_each(|(_k, v)| { diff --git a/substrate/frame/contracts/src/migration/v13.rs b/substrate/frame/contracts/src/migration/v13.rs index dd2eb12eb62a5daf92cec8b5910c03f6d81ae0c0..498c44d53abce9730e9833fcbd27e1b2dbb2fe38 100644 --- a/substrate/frame/contracts/src/migration/v13.rs +++ b/substrate/frame/contracts/src/migration/v13.rs @@ -28,7 +28,7 @@ use frame_support::{pallet_prelude::*, storage_alias, DefaultNoBound}; use sp_runtime::BoundedBTreeMap; use sp_std::prelude::*; -mod old { +mod v12 { use super::*; #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] @@ -59,7 +59,7 @@ pub fn store_old_contract_info(account: T::AccountId, info: crate::Co let entropy = (b"contract_depo_v1", account.clone()).using_encoded(T::Hashing::hash); let deposit_account = Decode::decode(&mut TrailingZeroInput::new(entropy.as_ref())) .expect("infinite length input; no invalid inputs for type; qed"); - let info = old::ContractInfo { + let info = v12::ContractInfo { trie_id: info.trie_id.clone(), deposit_account, code_hash: info.code_hash, @@ -69,7 +69,7 @@ pub fn store_old_contract_info(account: T::AccountId, info: crate::Co storage_item_deposit: Default::default(), storage_base_deposit: Default::default(), }; - old::ContractInfoOf::::insert(account, info); + v12::ContractInfoOf::::insert(account, info); } #[storage_alias] @@ -104,11 +104,11 @@ impl MigrationStep for Migration { fn step(&mut self) -> (IsFinished, Weight) { let mut iter = if let Some(last_account) = self.last_account.take() { - old::ContractInfoOf::::iter_from(old::ContractInfoOf::::hashed_key_for( + v12::ContractInfoOf::::iter_from(v12::ContractInfoOf::::hashed_key_for( last_account, )) } else { - old::ContractInfoOf::::iter() + v12::ContractInfoOf::::iter() }; if let Some((key, old)) = iter.next() { diff --git a/substrate/frame/contracts/src/migration/v14.rs b/substrate/frame/contracts/src/migration/v14.rs index 94534d05fdf889d05227149efd57ede584ecb013..09da09e5bcfb1f9ab4396a651e67519a75e09df2 100644 --- a/substrate/frame/contracts/src/migration/v14.rs +++ b/substrate/frame/contracts/src/migration/v14.rs @@ -44,7 +44,7 @@ use sp_runtime::{traits::Zero, Saturating}; #[cfg(feature = "try-runtime")] use sp_std::collections::btree_map::BTreeMap; -mod old { +mod v13 { use super::*; pub type BalanceOf = , #[codec(compact)] - pub deposit: old::BalanceOf, + pub deposit: v13::BalanceOf, #[codec(compact)] pub refcount: u64, pub determinism: Determinism, @@ -86,14 +86,14 @@ where let code = vec![42u8; len as usize]; let hash = T::Hashing::hash(&code); - let info = old::CodeInfo { + let info = v13::CodeInfo { owner: account, deposit: 10_000u32.into(), refcount: u64::MAX, determinism: Determinism::Enforced, code_len: len, }; - old::CodeInfoOf::::insert(hash, info); + v13::CodeInfoOf::::insert(hash, info); } #[cfg(feature = "try-runtime")] @@ -105,9 +105,9 @@ where OldCurrency: ReservableCurrency<::AccountId>, { /// Total reserved balance as code upload deposit for the owner. - reserved: old::BalanceOf, + reserved: v13::BalanceOf, /// Total balance of the owner. - total: old::BalanceOf, + total: v13::BalanceOf, } #[derive(Encode, Decode, MaxEncodedLen, DefaultNoBound)] @@ -134,11 +134,11 @@ where fn step(&mut self) -> (IsFinished, Weight) { let mut iter = if let Some(last_hash) = self.last_code_hash.take() { - old::CodeInfoOf::::iter_from( - old::CodeInfoOf::::hashed_key_for(last_hash), + v13::CodeInfoOf::::iter_from( + v13::CodeInfoOf::::hashed_key_for(last_hash), ) } else { - old::CodeInfoOf::::iter() + v13::CodeInfoOf::::iter() }; if let Some((hash, code_info)) = iter.next() { @@ -194,7 +194,7 @@ where #[cfg(feature = "try-runtime")] fn pre_upgrade_step() -> Result, TryRuntimeError> { - let info: Vec<_> = old::CodeInfoOf::::iter().collect(); + let info: Vec<_> = v13::CodeInfoOf::::iter().collect(); let mut owner_balance_allocation = BTreeMap::, BalanceAllocation>::new(); diff --git a/substrate/frame/contracts/src/migration/v15.rs b/substrate/frame/contracts/src/migration/v15.rs index 180fe855ca66728ba18b17555363eeac11a340ba..c77198d6fea0f6b7c305d40b981330cbb50e355d 100644 --- a/substrate/frame/contracts/src/migration/v15.rs +++ b/substrate/frame/contracts/src/migration/v15.rs @@ -46,7 +46,7 @@ use sp_runtime::{traits::Zero, Saturating}; #[cfg(feature = "try-runtime")] use sp_std::vec::Vec; -mod old { +mod v14 { use super::*; #[derive( @@ -81,7 +81,7 @@ pub fn store_old_contract_info(account: T::AccountId, info: crate::Co let entropy = (b"contract_depo_v1", account.clone()).using_encoded(T::Hashing::hash); let deposit_account = Decode::decode(&mut TrailingZeroInput::new(entropy.as_ref())) .expect("infinite length input; no invalid inputs for type; qed"); - let info = old::ContractInfo { + let info = v14::ContractInfo { trie_id: info.trie_id.clone(), deposit_account, code_hash: info.code_hash, @@ -92,7 +92,7 @@ pub fn store_old_contract_info(account: T::AccountId, info: crate::Co storage_base_deposit: info.storage_base_deposit(), delegate_dependencies: info.delegate_dependencies().clone(), }; - old::ContractInfoOf::::insert(account, info); + v14::ContractInfoOf::::insert(account, info); } #[derive(Encode, Decode, CloneNoBound, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] @@ -127,11 +127,11 @@ impl MigrationStep for Migration { fn step(&mut self) -> (IsFinished, Weight) { let mut iter = if let Some(last_account) = self.last_account.take() { - old::ContractInfoOf::::iter_from(old::ContractInfoOf::::hashed_key_for( + v14::ContractInfoOf::::iter_from(v14::ContractInfoOf::::hashed_key_for( last_account, )) } else { - old::ContractInfoOf::::iter() + v14::ContractInfoOf::::iter() }; if let Some((account, old_contract)) = iter.next() { @@ -243,11 +243,11 @@ impl MigrationStep for Migration { #[cfg(feature = "try-runtime")] fn pre_upgrade_step() -> Result, TryRuntimeError> { - let sample: Vec<_> = old::ContractInfoOf::::iter().take(100).collect(); + let sample: Vec<_> = v14::ContractInfoOf::::iter().take(100).collect(); log::debug!(target: LOG_TARGET, "Taking sample of {} contracts", sample.len()); - let state: Vec<(T::AccountId, old::ContractInfo, BalanceOf, BalanceOf)> = sample + let state: Vec<(T::AccountId, v14::ContractInfo, BalanceOf, BalanceOf)> = sample .iter() .map(|(account, contract)| { ( @@ -265,7 +265,7 @@ impl MigrationStep for Migration { #[cfg(feature = "try-runtime")] fn post_upgrade_step(state: Vec) -> Result<(), TryRuntimeError> { let sample = - , BalanceOf, BalanceOf)> as Decode>::decode( + , BalanceOf, BalanceOf)> as Decode>::decode( &mut &state[..], ) .expect("pre_upgrade_step provides a valid state; qed"); diff --git a/substrate/frame/contracts/src/schedule.rs b/substrate/frame/contracts/src/schedule.rs index 54e037d6be94419443e8aba95ebf044b2708e88b..b2e3801deaec1a36aef589427a6bf9dd324183b8 100644 --- a/substrate/frame/contracts/src/schedule.rs +++ b/substrate/frame/contracts/src/schedule.rs @@ -298,11 +298,11 @@ pub struct HostFnWeights { /// Weight of calling `instantiation_nonce`. pub instantiation_nonce: Weight, - /// Weight of calling `add_delegate_dependency`. - pub add_delegate_dependency: Weight, + /// Weight of calling `lock_delegate_dependency`. + pub lock_delegate_dependency: Weight, - /// Weight of calling `remove_delegate_dependency`. - pub remove_delegate_dependency: Weight, + /// Weight of calling `unlock_delegate_dependency`. + pub unlock_delegate_dependency: Weight, /// The type parameter is used in the default implementation. #[codec(skip)] @@ -435,8 +435,8 @@ impl Default for HostFnWeights { reentrance_count: cost!(seal_reentrance_count), account_reentrance_count: cost!(seal_account_reentrance_count), instantiation_nonce: cost!(seal_instantiation_nonce), - add_delegate_dependency: cost!(add_delegate_dependency), - remove_delegate_dependency: cost!(remove_delegate_dependency), + lock_delegate_dependency: cost!(lock_delegate_dependency), + unlock_delegate_dependency: cost!(unlock_delegate_dependency), _phantom: PhantomData, } } diff --git a/substrate/frame/contracts/src/storage.rs b/substrate/frame/contracts/src/storage.rs index d4d261f4ec1ffcfc68c5245864b00ea2f47928ed..e99afd5d42ee4684adc874d68bdba325feb58358 100644 --- a/substrate/frame/contracts/src/storage.rs +++ b/substrate/frame/contracts/src/storage.rs @@ -66,7 +66,7 @@ pub struct ContractInfo { storage_base_deposit: BalanceOf, /// Map of code hashes and deposit balances. /// - /// Tracks the code hash and deposit held for adding delegate dependencies. Dependencies added + /// Tracks the code hash and deposit held for locking delegate dependencies. Dependencies added /// to the map can not be removed from the chain state and can be safely used for delegate /// calls. delegate_dependencies: BoundedBTreeMap, BalanceOf, T::MaxDelegateDependencies>, @@ -108,6 +108,11 @@ impl ContractInfo { Ok(contract) } + /// Returns the number of locked delegate dependencies. + pub fn delegate_dependencies_count(&self) -> usize { + self.delegate_dependencies.len() + } + /// Associated child trie unique id is built from the hash part of the trie id. pub fn child_trie_info(&self) -> ChildInfo { ChildInfo::new_default(self.trie_id.as_ref()) @@ -233,7 +238,7 @@ impl ContractInfo { /// /// Returns an error if the maximum number of delegate_dependencies is reached or if /// the delegate dependency already exists. - pub fn add_delegate_dependency( + pub fn lock_delegate_dependency( &mut self, code_hash: CodeHash, amount: BalanceOf, @@ -249,7 +254,7 @@ impl ContractInfo { /// dependency. /// /// Returns an error if the entry doesn't exist. - pub fn remove_delegate_dependency( + pub fn unlock_delegate_dependency( &mut self, code_hash: &CodeHash, ) -> Result, DispatchError> { @@ -322,7 +327,8 @@ impl ContractInfo { KillStorageResult::SomeRemaining(_) => return weight_limit, KillStorageResult::AllRemoved(keys_removed) => { entry.remove(); - remaining_key_budget = remaining_key_budget.saturating_sub(keys_removed); + // charge at least one key even if none were removed. + remaining_key_budget = remaining_key_budget.saturating_sub(keys_removed.max(1)); }, }; } diff --git a/substrate/frame/contracts/src/tests.rs b/substrate/frame/contracts/src/tests.rs index a06b65b86b5a030f0d5255f64a37dcb2dc6b272a..4c1f65e28d5f73e04db8e1ffd301e12f9fe0c8f9 100644 --- a/substrate/frame/contracts/src/tests.rs +++ b/substrate/frame/contracts/src/tests.rs @@ -35,8 +35,8 @@ use crate::{ tests::test_utils::{get_contract, get_contract_checked}, wasm::{Determinism, ReturnErrorCode as RuntimeReturnCode}, weights::WeightInfo, - BalanceOf, Code, CodeHash, CodeInfoOf, CollectEvents, Config, ContractInfo, ContractInfoOf, - DebugInfo, DefaultAddressGenerator, DeletionQueueCounter, Error, HoldReason, + Array, BalanceOf, Code, CodeHash, CodeInfoOf, CollectEvents, Config, ContractInfo, + ContractInfoOf, DebugInfo, DefaultAddressGenerator, DeletionQueueCounter, Error, HoldReason, MigrationInProgress, Origin, Pallet, PristineCode, Schedule, }; use assert_matches::assert_matches; @@ -45,6 +45,7 @@ use frame_support::{ assert_err, assert_err_ignore_postinfo, assert_err_with_weight, assert_noop, assert_ok, derive_impl, dispatch::{DispatchErrorWithPostInfo, PostDispatchInfo}, + pallet_prelude::EnsureOrigin, parameter_types, storage::child, traits::{ @@ -434,6 +435,33 @@ impl Contains for TestFilter { } } +parameter_types! { + pub static UploadAccount: Option<::AccountId> = None; + pub static InstantiateAccount: Option<::AccountId> = None; +} + +pub struct EnsureAccount(sp_std::marker::PhantomData<(T, A)>); +impl>>> + EnsureOrigin<::RuntimeOrigin> for EnsureAccount +where + ::AccountId: From, +{ + type Success = T::AccountId; + + fn try_origin(o: T::RuntimeOrigin) -> Result { + let who = as EnsureOrigin<_>>::try_origin(o.clone())?; + if matches!(A::get(), Some(a) if who != a) { + return Err(o) + } + + Ok(who) + } + + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result { + Err(()) + } +} parameter_types! { pub static UnstableInterface: bool = true; } @@ -458,6 +486,8 @@ impl Config for Test { type MaxCodeLen = ConstU32<{ 123 * 1024 }>; type MaxStorageKeyLen = ConstU32<128>; type UnsafeUnstableInterface = UnstableInterface; + type UploadOrigin = EnsureAccount; + type InstantiateOrigin = EnsureAccount; type MaxDebugBufferLen = ConstU32<{ 2 * 1024 * 1024 }>; type RuntimeHoldReason = RuntimeHoldReason; type Migrations = crate::migration::codegen::BenchMigrations; @@ -465,6 +495,7 @@ impl Config for Test { type MaxDelegateDependencies = MaxDelegateDependencies; type Debug = TestDebug; type Environment = (); + type ApiVersion = (); type Xcm = (); } @@ -821,27 +852,6 @@ fn deposit_event_max_value_limit() { }); } -// Fail out of fuel (ref_time weight) inside the start function. -#[test] -fn run_out_of_fuel_start_fun() { - let (wasm, _code_hash) = compile_module::("run_out_of_gas_start_fn").unwrap(); - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - assert_err_ignore_postinfo!( - Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), - 0, - Weight::from_parts(1_000_000_000_000, u64::MAX), - None, - wasm, - vec![], - vec![], - ), - Error::::OutOfGas, - ); - }); -} - // Fail out of fuel (ref_time weight) in the engine. #[test] fn run_out_of_fuel_engine() { @@ -925,50 +935,15 @@ fn run_out_of_fuel_host() { #[test] fn gas_syncs_work() { - let (wasm0, _code_hash) = compile_module::("seal_input_noop").unwrap(); - let (wasm1, _code_hash) = compile_module::("seal_input_once").unwrap(); - let (wasm2, _code_hash) = compile_module::("seal_input_twice").unwrap(); + let (code, _code_hash) = compile_module::("caller_is_origin_n").unwrap(); ExtBuilder::default().existential_deposit(200).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - // Instantiate noop contract. - let addr0 = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm0), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; - - // Instantiate 1st contract. - let addr1 = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm1), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; - - // Instantiate 2nd contract. - let addr2 = Contracts::bare_instantiate( + let addr = Contracts::bare_instantiate( ALICE, 0, GAS_LIMIT, None, - Code::Upload(wasm2), + Code::Upload(code), vec![], vec![], DebugInfo::Skip, @@ -980,11 +955,11 @@ fn gas_syncs_work() { let result = Contracts::bare_call( ALICE, - addr0, + addr.clone(), 0, GAS_LIMIT, None, - 1u8.to_le_bytes().to_vec(), + 0u32.encode(), DebugInfo::Skip, CollectEvents::Skip, Determinism::Enforced, @@ -994,27 +969,28 @@ fn gas_syncs_work() { let result = Contracts::bare_call( ALICE, - addr1, + addr.clone(), 0, GAS_LIMIT, None, - 1u8.to_le_bytes().to_vec(), + 1u32.encode(), DebugInfo::Skip, CollectEvents::Skip, Determinism::Enforced, ); assert_ok!(result.result); let gas_consumed_once = result.gas_consumed.ref_time(); - let host_consumed_once = ::Schedule::get().host_fn_weights.input.ref_time(); + let host_consumed_once = + ::Schedule::get().host_fn_weights.caller_is_origin.ref_time(); let engine_consumed_once = gas_consumed_once - host_consumed_once - engine_consumed_noop; let result = Contracts::bare_call( ALICE, - addr2, + addr, 0, GAS_LIMIT, None, - 1u8.to_le_bytes().to_vec(), + 2u32.encode(), DebugInfo::Skip, CollectEvents::Skip, Determinism::Enforced, @@ -4422,96 +4398,6 @@ fn contract_reverted() { }); } -#[test] -fn code_rejected_error_works() { - ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - - let (wasm, _) = compile_module::("invalid_module").unwrap(); - assert_noop!( - Contracts::upload_code( - RuntimeOrigin::signed(ALICE), - wasm.clone(), - None, - Determinism::Enforced - ), - >::CodeRejected, - ); - let result = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::UnsafeDebug, - CollectEvents::Skip, - ); - assert_err!(result.result, >::CodeRejected); - assert_eq!( - std::str::from_utf8(&result.debug_message).unwrap(), - "Can't load the module into wasmi!" - ); - - let (wasm, _) = compile_module::("invalid_contract_no_call").unwrap(); - assert_noop!( - Contracts::upload_code( - RuntimeOrigin::signed(ALICE), - wasm.clone(), - None, - Determinism::Enforced - ), - >::CodeRejected, - ); - - let result = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::UnsafeDebug, - CollectEvents::Skip, - ); - assert_err!(result.result, >::CodeRejected); - assert_eq!( - std::str::from_utf8(&result.debug_message).unwrap(), - "call function isn't exported" - ); - - let (wasm, _) = compile_module::("invalid_contract_no_memory").unwrap(); - assert_noop!( - Contracts::upload_code( - RuntimeOrigin::signed(ALICE), - wasm.clone(), - None, - Determinism::Enforced - ), - >::CodeRejected, - ); - - let result = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::UnsafeDebug, - CollectEvents::Skip, - ); - assert_err!(result.result, >::CodeRejected); - assert_eq!( - std::str::from_utf8(&result.debug_message).unwrap(), - "No memory import found in the module" - ); - }); -} - #[test] fn set_code_hash() { let (wasm, code_hash) = compile_module::("set_code_hash").unwrap(); @@ -5154,6 +5040,26 @@ fn deposit_limit_honors_min_leftover() { }); } +#[test] +fn upload_should_enforce_deterministic_mode_when_possible() { + let upload = |fixture, determinism| { + let (wasm, code_hash) = compile_module::(fixture).unwrap(); + ExtBuilder::default() + .build() + .execute_with(|| -> Result { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + Contracts::bare_upload_code(ALICE, wasm, None, determinism)?; + let info = CodeInfoOf::::get(code_hash).unwrap(); + Ok(info.determinism()) + }) + }; + + assert_eq!(upload("dummy", Determinism::Enforced), Ok(Determinism::Enforced)); + assert_eq!(upload("dummy", Determinism::Relaxed), Ok(Determinism::Enforced)); + assert_eq!(upload("float_instruction", Determinism::Relaxed), Ok(Determinism::Relaxed)); + assert!(upload("float_instruction", Determinism::Enforced).is_err()); +} + #[test] fn cannot_instantiate_indeterministic_code() { let (wasm, code_hash) = compile_module::("float_instruction").unwrap(); @@ -5406,21 +5312,21 @@ fn delegate_call_indeterministic_code() { } #[test] -fn add_remove_delegate_dependency_works() { +fn locking_delegate_dependency_works() { // set hash lock up deposit to 30%, to test deposit calculation. CODE_HASH_LOCKUP_DEPOSIT_PERCENT.with(|c| *c.borrow_mut() = Perbill::from_percent(30)); MAX_DELEGATE_DEPENDENCIES.with(|c| *c.borrow_mut() = 1); let (wasm_caller, self_code_hash) = - compile_module::("add_remove_delegate_dependency").unwrap(); + compile_module::("locking_delegate_dependency").unwrap(); let (wasm_callee, code_hash) = compile_module::("dummy").unwrap(); let (wasm_other, other_code_hash) = compile_module::("call").unwrap(); - // Define inputs with various actions to test adding / removing delegate_dependencies. + // Define inputs with various actions to test locking / unlocking delegate_dependencies. // See the contract for more details. let noop_input = (0u32, code_hash); - let add_delegate_dependency_input = (1u32, code_hash); - let remove_delegate_dependency_input = (2u32, code_hash); + let lock_delegate_dependency_input = (1u32, code_hash); + let unlock_delegate_dependency_input = (2u32, code_hash); let terminate_input = (3u32, code_hash); // Instantiate the caller contract with the given input. @@ -5457,9 +5363,9 @@ fn add_remove_delegate_dependency_works() { ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { let _ = Balances::set_balance(&ALICE, 1_000_000); - // Instantiate with add_delegate_dependency should fail since the code is not yet on chain. + // Instantiate with lock_delegate_dependency should fail since the code is not yet on chain. assert_err!( - instantiate(&add_delegate_dependency_input).result, + instantiate(&lock_delegate_dependency_input).result, Error::::CodeNotFound ); @@ -5469,7 +5375,7 @@ fn add_remove_delegate_dependency_works() { .unwrap(); // Instantiate should now work. - let addr_caller = instantiate(&add_delegate_dependency_input).result.unwrap().account_id; + let addr_caller = instantiate(&lock_delegate_dependency_input).result.unwrap().account_id; // There should be a dependency and a deposit. let contract = test_utils::get_contract(&addr_caller); @@ -5490,27 +5396,27 @@ fn add_remove_delegate_dependency_works() { >::CodeInUse ); - // Adding an already existing dependency should fail. + // Locking an already existing dependency should fail. assert_err!( - call(&addr_caller, &add_delegate_dependency_input).result, + call(&addr_caller, &lock_delegate_dependency_input).result, Error::::DelegateDependencyAlreadyExists ); - // Adding a dependency to self should fail. + // Locking self should fail. assert_err!( call(&addr_caller, &(1u32, self_code_hash)).result, Error::::CannotAddSelfAsDelegateDependency ); - // Adding more than the maximum allowed delegate_dependencies should fail. + // Locking more than the maximum allowed delegate_dependencies should fail. Contracts::bare_upload_code(ALICE, wasm_other, None, Determinism::Enforced).unwrap(); assert_err!( call(&addr_caller, &(1u32, other_code_hash)).result, Error::::MaxDelegateDependenciesReached ); - // Removing dependency should work. - assert_ok!(call(&addr_caller, &remove_delegate_dependency_input).result); + // Unlocking dependency should work. + assert_ok!(call(&addr_caller, &unlock_delegate_dependency_input).result); // Dependency should be removed, and deposit should be returned. let contract = test_utils::get_contract(&addr_caller); @@ -5525,18 +5431,18 @@ fn add_remove_delegate_dependency_works() { // Removing an unexisting dependency should fail. assert_err!( - call(&addr_caller, &remove_delegate_dependency_input).result, + call(&addr_caller, &unlock_delegate_dependency_input).result, Error::::DelegateDependencyNotFound ); - // Adding a dependency with a storage limit too low should fail. + // Locking a dependency with a storage limit too low should fail. DEFAULT_DEPOSIT_LIMIT.with(|c| *c.borrow_mut() = dependency_deposit - 1); assert_err!( - call(&addr_caller, &add_delegate_dependency_input).result, + call(&addr_caller, &lock_delegate_dependency_input).result, Error::::StorageDepositLimitExhausted ); - // Since we removed the dependency we should now be able to remove the code. + // Since we unlocked the dependency we should now be able to remove the code. assert_ok!(Contracts::remove_code(RuntimeOrigin::signed(ALICE), code_hash)); // Calling should fail since the delegated contract is not on chain anymore. @@ -5545,7 +5451,7 @@ fn add_remove_delegate_dependency_works() { // Restore initial deposit limit and add the dependency back. DEFAULT_DEPOSIT_LIMIT.with(|c| *c.borrow_mut() = 10_000_000); Contracts::bare_upload_code(ALICE, wasm_callee, None, Determinism::Enforced).unwrap(); - call(&addr_caller, &add_delegate_dependency_input).result.unwrap(); + call(&addr_caller, &lock_delegate_dependency_input).result.unwrap(); // Call terminate should work, and return the deposit. let balance_before = test_utils::get_balance(&ALICE); @@ -5923,7 +5829,106 @@ fn root_cannot_instantiate() { vec![], vec![], ), - DispatchError::RootNotAllowed + DispatchError::BadOrigin + ); + }); +} + +#[test] +fn only_upload_origin_can_upload() { + let (wasm, _) = compile_module::("dummy").unwrap(); + UploadAccount::set(Some(ALICE)); + ExtBuilder::default().build().execute_with(|| { + let _ = Balances::set_balance(&ALICE, 1_000_000); + let _ = Balances::set_balance(&BOB, 1_000_000); + + assert_err!( + Contracts::upload_code( + RuntimeOrigin::root(), + wasm.clone(), + None, + Determinism::Enforced, + ), + DispatchError::BadOrigin + ); + + assert_err!( + Contracts::upload_code( + RuntimeOrigin::signed(BOB), + wasm.clone(), + None, + Determinism::Enforced, + ), + DispatchError::BadOrigin + ); + + // Only alice is allowed to upload contract code. + assert_ok!(Contracts::upload_code( + RuntimeOrigin::signed(ALICE), + wasm.clone(), + None, + Determinism::Enforced, + )); + }); +} + +#[test] +fn only_instantiation_origin_can_instantiate() { + let (code, code_hash) = compile_module::("dummy").unwrap(); + InstantiateAccount::set(Some(ALICE)); + ExtBuilder::default().build().execute_with(|| { + let _ = Balances::set_balance(&ALICE, 1_000_000); + let _ = Balances::set_balance(&BOB, 1_000_000); + + assert_err_ignore_postinfo!( + Contracts::instantiate_with_code( + RuntimeOrigin::root(), + 0, + GAS_LIMIT, + None, + code.clone(), + vec![], + vec![], + ), + DispatchError::BadOrigin + ); + + assert_err_ignore_postinfo!( + Contracts::instantiate_with_code( + RuntimeOrigin::signed(BOB), + 0, + GAS_LIMIT, + None, + code.clone(), + vec![], + vec![], + ), + DispatchError::BadOrigin + ); + + // Only Alice can instantiate + assert_ok!(Contracts::instantiate_with_code( + RuntimeOrigin::signed(ALICE), + 0, + GAS_LIMIT, + None, + code, + vec![], + vec![], + ),); + + // Bob cannot instantiate with either `instantiate_with_code` or `instantiate`. + assert_err_ignore_postinfo!( + Contracts::instantiate( + RuntimeOrigin::signed(BOB), + 0, + GAS_LIMIT, + None, + code_hash, + vec![], + vec![], + ), + DispatchError::BadOrigin ); }); } @@ -5977,3 +5982,53 @@ fn balance_api_returns_free_balance() { ); }); } + +#[test] +fn gas_consumed_is_linear_for_nested_calls() { + let (code, _code_hash) = compile_module::("recurse").unwrap(); + ExtBuilder::default().existential_deposit(200).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + + let addr = Contracts::bare_instantiate( + ALICE, + 0, + GAS_LIMIT, + None, + Code::Upload(code), + vec![], + vec![], + DebugInfo::Skip, + CollectEvents::Skip, + ) + .result + .unwrap() + .account_id; + + let max_call_depth = ::CallStack::size() as u32; + let [gas_0, gas_1, gas_2, gas_max] = { + [0u32, 1u32, 2u32, max_call_depth] + .iter() + .map(|i| { + let result = Contracts::bare_call( + ALICE, + addr.clone(), + 0, + GAS_LIMIT, + None, + i.encode(), + DebugInfo::Skip, + CollectEvents::Skip, + Determinism::Enforced, + ); + assert_ok!(result.result); + result.gas_consumed + }) + .collect::>() + .try_into() + .unwrap() + }; + + let gas_per_recursion = gas_2.checked_sub(&gas_1).unwrap(); + assert_eq!(gas_max, gas_0 + gas_per_recursion * max_call_depth as u64); + }); +} diff --git a/substrate/frame/contracts/src/wasm/mod.rs b/substrate/frame/contracts/src/wasm/mod.rs index 1c7395ec3bfffb929792eb23197bfd8ea73a32b5..af287a6f2dff946fa5d3e688ff8a71d9db4c22a3 100644 --- a/substrate/frame/contracts/src/wasm/mod.rs +++ b/substrate/frame/contracts/src/wasm/mod.rs @@ -21,6 +21,9 @@ mod prepare; mod runtime; +#[cfg(test)] +pub use runtime::STABLE_API_COUNT; + #[cfg(doc)] pub use crate::wasm::runtime::api_doc; @@ -313,6 +316,11 @@ impl CodeInfo { } } + /// Returns the determinism of the module. + pub fn determinism(&self) -> Determinism { + self.determinism + } + /// Returns reference count of the module. pub fn refcount(&self) -> u64 { self.refcount @@ -386,7 +394,7 @@ impl Executable for WasmBlob { let engine_consumed_total = store.fuel_consumed().expect("Fuel metering is enabled; qed"); let gas_meter = store.data_mut().ext().gas_meter_mut(); - gas_meter.charge_fuel(engine_consumed_total)?; + let _ = gas_meter.sync_from_executor(engine_consumed_total)?; store.into_data().to_execution_result(result) }; @@ -733,14 +741,14 @@ mod tests { Ok(()) } fn decrement_refcount(_code_hash: CodeHash) {} - fn add_delegate_dependency( + fn lock_delegate_dependency( &mut self, code: CodeHash, ) -> Result<(), DispatchError> { self.delegate_dependencies.borrow_mut().insert(code); Ok(()) } - fn remove_delegate_dependency( + fn unlock_delegate_dependency( &mut self, code: &CodeHash, ) -> Result<(), DispatchError> { @@ -1821,17 +1829,6 @@ mod tests { assert!(weight_left.all_gt(actual_left), "gas_left must be greater than final"); } - /// Test that [`frame_support::weights::OldWeight`] en/decodes the same as our - /// [`crate::OldWeight`]. - #[test] - fn old_weight_decode() { - #![allow(deprecated)] - let sp = frame_support::weights::OldWeight(42).encode(); - let our = crate::OldWeight::decode(&mut &*sp).unwrap(); - - assert_eq!(our, 42); - } - const CODE_VALUE_TRANSFERRED: &str = r#" (module (import "seal0" "seal_value_transferred" (func $seal_value_transferred (param i32 i32))) @@ -3399,16 +3396,16 @@ mod tests { } #[test] - fn add_remove_delegate_dependency() { - const CODE_ADD_REMOVE_DELEGATE_DEPENDENCY: &str = r#" + fn lock_unlock_delegate_dependency() { + const CODE_LOCK_UNLOCK_DELEGATE_DEPENDENCY: &str = r#" (module - (import "seal0" "add_delegate_dependency" (func $add_delegate_dependency (param i32))) - (import "seal0" "remove_delegate_dependency" (func $remove_delegate_dependency (param i32))) + (import "seal0" "lock_delegate_dependency" (func $lock_delegate_dependency (param i32))) + (import "seal0" "unlock_delegate_dependency" (func $unlock_delegate_dependency (param i32))) (import "env" "memory" (memory 1 1)) (func (export "call") - (call $add_delegate_dependency (i32.const 0)) - (call $add_delegate_dependency (i32.const 32)) - (call $remove_delegate_dependency (i32.const 32)) + (call $lock_delegate_dependency (i32.const 0)) + (call $lock_delegate_dependency (i32.const 32)) + (call $unlock_delegate_dependency (i32.const 32)) ) (func (export "deploy")) @@ -3426,7 +3423,7 @@ mod tests { ) "#; let mut mock_ext = MockExt::default(); - assert_ok!(execute(&CODE_ADD_REMOVE_DELEGATE_DEPENDENCY, vec![], &mut mock_ext)); + assert_ok!(execute(&CODE_LOCK_UNLOCK_DELEGATE_DEPENDENCY, vec![], &mut mock_ext)); let delegate_dependencies: Vec<_> = mock_ext.delegate_dependencies.into_inner().into_iter().collect(); assert_eq!(delegate_dependencies.len(), 1); @@ -3448,4 +3445,22 @@ mod tests { runtime.read_sandbox_memory_as(&memory, 0u32).unwrap(); assert_eq!(decoded.into_inner(), data); } + + #[test] + fn run_out_of_gas_in_start_fn() { + const CODE: &str = r#" +(module + (import "env" "memory" (memory 1 1)) + (start $start) + (func $start + (loop $inf (br $inf)) ;; just run out of gas + (unreachable) + ) + (func (export "call")) + (func (export "deploy")) +) +"#; + let mut mock_ext = MockExt::default(); + assert_err!(execute(&CODE, vec![], &mut mock_ext), >::OutOfGas); + } } diff --git a/substrate/frame/contracts/src/wasm/prepare.rs b/substrate/frame/contracts/src/wasm/prepare.rs index 5cdf5600fcdc6d08b55cd68685a532494b4583a7..5efea8c3a23b319ff86fa38089ef04e1dcdc6681 100644 --- a/substrate/frame/contracts/src/wasm/prepare.rs +++ b/substrate/frame/contracts/src/wasm/prepare.rs @@ -79,7 +79,10 @@ impl LoadedModule { } let engine = Engine::new(&config); - let module = Module::new(&engine, code).map_err(|_| "Can't load the module into wasmi!")?; + let module = Module::new(&engine, code).map_err(|err| { + log::debug!(target: LOG_TARGET, "Module creation failed: {:?}", err); + "Can't load the module into wasmi!" + })?; // Return a `LoadedModule` instance with // __valid__ module. @@ -220,7 +223,7 @@ impl LoadedModule { fn validate( code: &[u8], schedule: &Schedule, - determinism: Determinism, + determinism: &mut Determinism, ) -> Result<(), (DispatchError, &'static str)> where E: Environment<()>, @@ -229,7 +232,17 @@ where (|| { // We check that the module is generally valid, // and does not have restricted WebAssembly features, here. - let contract_module = LoadedModule::new::(code, determinism, None)?; + let contract_module = match *determinism { + Determinism::Relaxed => + if let Ok(module) = LoadedModule::new::(code, Determinism::Enforced, None) { + *determinism = Determinism::Enforced; + module + } else { + LoadedModule::new::(code, Determinism::Relaxed, None)? + }, + Determinism::Enforced => LoadedModule::new::(code, Determinism::Enforced, None)?, + }; + // The we check that module satisfies constraints the pallet puts on contracts. contract_module.scan_exports()?; contract_module.scan_imports::(schedule)?; @@ -252,7 +265,7 @@ where &code, (), schedule, - determinism, + *determinism, stack_limits, AllowDeprecatedInterface::No, ) @@ -276,13 +289,13 @@ pub fn prepare( code: CodeVec, schedule: &Schedule, owner: AccountIdOf, - determinism: Determinism, + mut determinism: Determinism, ) -> Result, (DispatchError, &'static str)> where E: Environment<()>, T: Config, { - validate::(code.as_ref(), schedule, determinism)?; + validate::(code.as_ref(), schedule, &mut determinism)?; // Calculate deposit for storing contract code and `code_info` in two different storage items. let code_len = code.len() as u32; diff --git a/substrate/frame/contracts/src/wasm/runtime.rs b/substrate/frame/contracts/src/wasm/runtime.rs index 74b19910c3719d423cfcbb54f1d0fa7bc50b2a8c..7d43d30ba580dee2f69bdbe3b51638e9d98e61cb 100644 --- a/substrate/frame/contracts/src/wasm/runtime.rs +++ b/substrate/frame/contracts/src/wasm/runtime.rs @@ -246,10 +246,10 @@ pub enum RuntimeCosts { AccountEntranceCount, /// Weight of calling `instantiation_nonce` InstantationNonce, - /// Weight of calling `add_delegate_dependency` - AddDelegateDependency, - /// Weight of calling `remove_delegate_dependency` - RemoveDelegateDependency, + /// Weight of calling `lock_delegate_dependency` + LockDelegateDependency, + /// Weight of calling `unlock_delegate_dependency` + UnlockDelegateDependency, } impl Token for RuntimeCosts { @@ -338,8 +338,8 @@ impl Token for RuntimeCosts { ReentrantCount => s.reentrance_count, AccountEntranceCount => s.account_reentrance_count, InstantationNonce => s.instantiation_nonce, - AddDelegateDependency => s.add_delegate_dependency, - RemoveDelegateDependency => s.remove_delegate_dependency, + LockDelegateDependency => s.lock_delegate_dependency, + UnlockDelegateDependency => s.unlock_delegate_dependency, } } } @@ -987,7 +987,6 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { // for every function. #[define_env(doc)] pub mod env { - /// Set the value at the given key in the contract storage. /// See [`pallet_contracts_uapi::HostFn::set_storage`] #[prefixed_alias] @@ -1216,7 +1215,6 @@ pub mod env { /// Make a call to another contract. /// See [`pallet_contracts_uapi::HostFn::call_v2`]. #[version(2)] - #[unstable] fn call( ctx: _, memory: _, @@ -1353,7 +1351,6 @@ pub mod env { /// Instantiate a contract with the specified code hash. /// See [`pallet_contracts_uapi::HostFn::instantiate_v2`]. #[version(2)] - #[unstable] fn instantiate( ctx: _, memory: _, @@ -2306,22 +2303,20 @@ pub mod env { } /// Adds a new delegate dependency to the contract. - /// See [`pallet_contracts_uapi::HostFn::add_delegate_dependency`]. - #[unstable] - fn add_delegate_dependency(ctx: _, memory: _, code_hash_ptr: u32) -> Result<(), TrapReason> { - ctx.charge_gas(RuntimeCosts::AddDelegateDependency)?; + /// See [`pallet_contracts_uapi::HostFn::lock_delegate_dependency`]. + fn lock_delegate_dependency(ctx: _, memory: _, code_hash_ptr: u32) -> Result<(), TrapReason> { + ctx.charge_gas(RuntimeCosts::LockDelegateDependency)?; let code_hash = ctx.read_sandbox_memory_as(memory, code_hash_ptr)?; - ctx.ext.add_delegate_dependency(code_hash)?; + ctx.ext.lock_delegate_dependency(code_hash)?; Ok(()) } /// Removes the delegate dependency from the contract. - /// see [`pallet_contracts_uapi::HostFn::remove_delegate_dependency`]. - #[unstable] - fn remove_delegate_dependency(ctx: _, memory: _, code_hash_ptr: u32) -> Result<(), TrapReason> { - ctx.charge_gas(RuntimeCosts::RemoveDelegateDependency)?; + /// see [`pallet_contracts_uapi::HostFn::unlock_delegate_dependency`]. + fn unlock_delegate_dependency(ctx: _, memory: _, code_hash_ptr: u32) -> Result<(), TrapReason> { + ctx.charge_gas(RuntimeCosts::UnlockDelegateDependency)?; let code_hash = ctx.read_sandbox_memory_as(memory, code_hash_ptr)?; - ctx.ext.remove_delegate_dependency(&code_hash)?; + ctx.ext.unlock_delegate_dependency(&code_hash)?; Ok(()) } } diff --git a/substrate/frame/contracts/src/weights.rs b/substrate/frame/contracts/src/weights.rs index fa9df922a7cbb9711c7b1315cf680a30fc4c5ebb..99bc4d8d3eb971cd54770af41d5b7f8cdd0890dd 100644 --- a/substrate/frame/contracts/src/weights.rs +++ b/substrate/frame/contracts/src/weights.rs @@ -17,14 +17,15 @@ //! Autogenerated weights for `pallet_contracts` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2024-01-19, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-04, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-j8vvqcjr-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-p5qp1txx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: // target/production/substrate-node +// target/production/substrate-node // benchmark // pallet // --steps=50 @@ -35,8 +36,12 @@ // --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=pallet_contracts // --chain=dev +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_contracts +// --chain=dev // --header=./substrate/HEADER-APACHE2 // --output=./substrate/frame/contracts/src/weights.rs +// --output=./substrate/frame/contracts/src/weights.rs // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -67,7 +72,8 @@ pub trait WeightInfo { fn instantiate_with_code(c: u32, i: u32, s: u32, ) -> Weight; fn instantiate(i: u32, s: u32, ) -> Weight; fn call() -> Weight; - fn upload_code(c: u32, ) -> Weight; + fn upload_code_determinism_enforced(c: u32, ) -> Weight; + fn upload_code_determinism_relaxed(c: u32, ) -> Weight; fn remove_code() -> Weight; fn set_code() -> Weight; fn seal_caller(r: u32, ) -> Weight; @@ -124,8 +130,8 @@ pub trait WeightInfo { fn seal_ecdsa_recover(r: u32, ) -> Weight; fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight; fn seal_set_code_hash(r: u32, ) -> Weight; - fn add_delegate_dependency(r: u32, ) -> Weight; - fn remove_delegate_dependency(r: u32, ) -> Weight; + fn lock_delegate_dependency(r: u32, ) -> Weight; + fn unlock_delegate_dependency(r: u32, ) -> Weight; fn seal_reentrance_count(r: u32, ) -> Weight; fn seal_account_reentrance_count(r: u32, ) -> Weight; fn seal_instantiation_nonce(r: u32, ) -> Weight; @@ -141,8 +147,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 1_997_000 picoseconds. - Weight::from_parts(2_130_000, 1627) + // Minimum execution time: 2_054_000 picoseconds. + Weight::from_parts(2_178_000, 1627) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -152,10 +158,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `452 + k * (69 ±0)` // Estimated: `442 + k * (70 ±0)` - // Minimum execution time: 12_276_000 picoseconds. - Weight::from_parts(1_593_881, 442) - // Standard Error: 1_135 - .saturating_add(Weight::from_parts(1_109_302, 0).saturating_mul(k.into())) + // Minimum execution time: 12_119_000 picoseconds. + Weight::from_parts(12_536_000, 442) + // Standard Error: 1_254 + .saturating_add(Weight::from_parts(1_161_885, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -169,10 +175,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `211 + c * (1 ±0)` // Estimated: `6149 + c * (1 ±0)` - // Minimum execution time: 8_176_000 picoseconds. - Weight::from_parts(8_555_388, 6149) + // Minimum execution time: 8_146_000 picoseconds. + Weight::from_parts(8_560_745, 6149) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_184, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(1_182, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -185,8 +191,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `510` // Estimated: `6450` - // Minimum execution time: 16_270_000 picoseconds. - Weight::from_parts(16_779_000, 6450) + // Minimum execution time: 16_183_000 picoseconds. + Weight::from_parts(16_825_000, 6450) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -199,10 +205,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `171 + k * (1 ±0)` // Estimated: `3635 + k * (1 ±0)` - // Minimum execution time: 3_572_000 picoseconds. - Weight::from_parts(1_950_905, 3635) - // Standard Error: 1_597 - .saturating_add(Weight::from_parts(1_123_190, 0).saturating_mul(k.into())) + // Minimum execution time: 3_645_000 picoseconds. + Weight::from_parts(604_365, 3635) + // Standard Error: 1_013 + .saturating_add(Weight::from_parts(1_100_277, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -219,13 +225,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 125952]`. fn v12_migration_step(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `325 + c * (1 ±0)` - // Estimated: `6263 + c * (1 ±0)` - // Minimum execution time: 16_873_000 picoseconds. - Weight::from_parts(16_790_402, 6263) + // Measured: `328 + c * (1 ±0)` + // Estimated: `6266 + c * (1 ±0)` + // Minimum execution time: 19_500_000 picoseconds. + Weight::from_parts(20_074_895, 6266) // Standard Error: 1 - .saturating_add(Weight::from_parts(396, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(Weight::from_parts(422, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) } @@ -235,8 +241,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `440` // Estimated: `6380` - // Minimum execution time: 11_904_000 picoseconds. - Weight::from_parts(12_785_000, 6380) + // Minimum execution time: 12_530_000 picoseconds. + Weight::from_parts(13_362_000, 6380) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -250,8 +256,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `352` // Estimated: `6292` - // Minimum execution time: 44_920_000 picoseconds. - Weight::from_parts(46_163_000, 6292) + // Minimum execution time: 44_275_000 picoseconds. + Weight::from_parts(45_718_000, 6292) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -263,8 +269,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `594` // Estimated: `6534` - // Minimum execution time: 53_864_000 picoseconds. - Weight::from_parts(55_139_000, 6534) + // Minimum execution time: 55_095_000 picoseconds. + Weight::from_parts(58_009_000, 6534) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -274,8 +280,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_375_000 picoseconds. - Weight::from_parts(2_487_000, 1627) + // Minimum execution time: 2_455_000 picoseconds. + Weight::from_parts(2_608_000, 1627) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -287,8 +293,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `166` // Estimated: `3631` - // Minimum execution time: 11_580_000 picoseconds. - Weight::from_parts(11_980_000, 3631) + // Minimum execution time: 11_207_000 picoseconds. + Weight::from_parts(11_802_000, 3631) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -298,8 +304,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 4_557_000 picoseconds. - Weight::from_parts(4_807_000, 3607) + // Minimum execution time: 4_581_000 picoseconds. + Weight::from_parts(4_781_000, 3607) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -310,8 +316,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `167` // Estimated: `3632` - // Minimum execution time: 6_253_000 picoseconds. - Weight::from_parts(6_479_000, 3632) + // Minimum execution time: 5_887_000 picoseconds. + Weight::from_parts(6_393_000, 3632) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -322,8 +328,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 6_166_000 picoseconds. - Weight::from_parts(6_545_000, 3607) + // Minimum execution time: 6_025_000 picoseconds. + Weight::from_parts(6_298_000, 3607) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -344,13 +350,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 125952]`. fn call_with_code_per_byte(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `801 + c * (1 ±0)` - // Estimated: `6739 + c * (1 ±0)` - // Minimum execution time: 282_232_000 picoseconds. - Weight::from_parts(266_148_573, 6739) - // Standard Error: 69 - .saturating_add(Weight::from_parts(34_592, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `804 + c * (1 ±0)` + // Estimated: `9217 + c * (1 ±0)` + // Minimum execution time: 294_976_000 picoseconds. + Weight::from_parts(277_235_948, 9217) + // Standard Error: 65 + .saturating_add(Weight::from_parts(34_445, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) } @@ -377,17 +383,17 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 1048576]`. fn instantiate_with_code(c: u32, i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `323` - // Estimated: `8737` - // Minimum execution time: 3_760_879_000 picoseconds. - Weight::from_parts(794_812_431, 8737) - // Standard Error: 149 - .saturating_add(Weight::from_parts(101_881, 0).saturating_mul(c.into())) - // Standard Error: 18 - .saturating_add(Weight::from_parts(1_404, 0).saturating_mul(i.into())) - // Standard Error: 18 - .saturating_add(Weight::from_parts(1_544, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) + // Measured: `326` + // Estimated: `8740` + // Minimum execution time: 3_864_558_000 picoseconds. + Weight::from_parts(421_676_889, 8740) + // Standard Error: 188 + .saturating_add(Weight::from_parts(103_788, 0).saturating_mul(c.into())) + // Standard Error: 22 + .saturating_add(Weight::from_parts(1_715, 0).saturating_mul(i.into())) + // Standard Error: 22 + .saturating_add(Weight::from_parts(1_774, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(14_u64)) .saturating_add(T::DbWeight::get().writes(10_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) @@ -412,15 +418,15 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 1048576]`. fn instantiate(i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `560` - // Estimated: `6504` - // Minimum execution time: 1_953_162_000 picoseconds. - Weight::from_parts(374_252_840, 6504) - // Standard Error: 7 - .saturating_add(Weight::from_parts(1_630, 0).saturating_mul(i.into())) - // Standard Error: 7 - .saturating_add(Weight::from_parts(1_650, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(10_u64)) + // Measured: `563` + // Estimated: `8982` + // Minimum execution time: 2_069_276_000 picoseconds. + Weight::from_parts(377_210_279, 8982) + // Standard Error: 9 + .saturating_add(Weight::from_parts(1_719, 0).saturating_mul(i.into())) + // Standard Error: 9 + .saturating_add(Weight::from_parts(1_728, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(13_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) @@ -439,11 +445,11 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) fn call() -> Weight { // Proof Size summary in bytes: - // Measured: `826` - // Estimated: `6766` - // Minimum execution time: 187_899_000 picoseconds. - Weight::from_parts(195_510_000, 6766) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `829` + // Estimated: `9244` + // Minimum execution time: 199_037_000 picoseconds. + Weight::from_parts(213_931_000, 9244) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) @@ -457,15 +463,39 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Contracts::PristineCode` (r:0 w:1) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) /// The range of component `c` is `[0, 125952]`. - fn upload_code(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `3607` - // Minimum execution time: 254_800_000 picoseconds. - Weight::from_parts(285_603_050, 3607) - // Standard Error: 62 - .saturating_add(Weight::from_parts(66_212, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(4_u64)) + fn upload_code_determinism_enforced(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `6085` + // Minimum execution time: 278_482_000 picoseconds. + Weight::from_parts(262_753_879, 6085) + // Standard Error: 112 + .saturating_add(Weight::from_parts(66_129, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(6_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) + } + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Storage: `System::EventTopics` (r:1 w:1) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:0 w:1) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// The range of component `c` is `[0, 125952]`. + fn upload_code_determinism_relaxed(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `6085` + // Minimum execution time: 286_902_000 picoseconds. + Weight::from_parts(269_003_959, 6085) + // Standard Error: 123 + .saturating_add(Weight::from_parts(66_629, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) @@ -482,8 +512,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `315` // Estimated: `3780` - // Minimum execution time: 43_553_000 picoseconds. - Weight::from_parts(45_036_000, 3780) + // Minimum execution time: 44_128_000 picoseconds. + Weight::from_parts(46_044_000, 3780) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -499,8 +529,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `552` // Estimated: `8967` - // Minimum execution time: 33_223_000 picoseconds. - Weight::from_parts(34_385_000, 8967) + // Minimum execution time: 33_567_000 picoseconds. + Weight::from_parts(35_057_000, 8967) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -521,13 +551,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_caller(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `866 + r * (6 ±0)` - // Estimated: `6806 + r * (6 ±0)` - // Minimum execution time: 254_213_000 picoseconds. - Weight::from_parts(273_464_980, 6806) - // Standard Error: 1_362 - .saturating_add(Weight::from_parts(322_619, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `869 + r * (6 ±0)` + // Estimated: `9284 + r * (6 ±0)` + // Minimum execution time: 272_376_000 picoseconds. + Weight::from_parts(284_162_161, 9284) + // Standard Error: 501 + .saturating_add(Weight::from_parts(341_575, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -548,13 +578,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_is_contract(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `922 + r * (209 ±0)` - // Estimated: `6826 + r * (2684 ±0)` - // Minimum execution time: 250_273_000 picoseconds. - Weight::from_parts(122_072_782, 6826) - // Standard Error: 5_629 - .saturating_add(Weight::from_parts(3_490_256, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `925 + r * (209 ±0)` + // Estimated: `9304 + r * (2684 ±0)` + // Minimum execution time: 256_990_000 picoseconds. + Weight::from_parts(107_167_044, 9304) + // Standard Error: 7_545 + .saturating_add(Weight::from_parts(3_628_112, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 2684).saturating_mul(r.into())) @@ -576,13 +606,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `921 + r * (213 ±0)` - // Estimated: `6830 + r * (2688 ±0)` - // Minimum execution time: 255_187_000 picoseconds. - Weight::from_parts(118_082_505, 6830) - // Standard Error: 6_302 - .saturating_add(Weight::from_parts(4_246_968, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `924 + r * (213 ±0)` + // Estimated: `9308 + r * (2688 ±0)` + // Minimum execution time: 272_889_000 picoseconds. + Weight::from_parts(110_341_799, 9308) + // Standard Error: 7_881 + .saturating_add(Weight::from_parts(4_427_043, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 2688).saturating_mul(r.into())) @@ -604,13 +634,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_own_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `873 + r * (6 ±0)` - // Estimated: `6815 + r * (6 ±0)` - // Minimum execution time: 256_833_000 picoseconds. - Weight::from_parts(273_330_216, 6815) - // Standard Error: 881 - .saturating_add(Weight::from_parts(400_105, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `876 + r * (6 ±0)` + // Estimated: `9293 + r * (6 ±0)` + // Minimum execution time: 275_027_000 picoseconds. + Weight::from_parts(283_302_223, 9293) + // Standard Error: 1_179 + .saturating_add(Weight::from_parts(436_023, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -631,13 +661,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_caller_is_origin(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `863 + r * (3 ±0)` - // Estimated: `6804 + r * (3 ±0)` - // Minimum execution time: 244_193_000 picoseconds. - Weight::from_parts(271_221_908, 6804) - // Standard Error: 442 - .saturating_add(Weight::from_parts(176_480, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `866 + r * (3 ±0)` + // Estimated: `9282 + r * (3 ±0)` + // Minimum execution time: 270_137_000 picoseconds. + Weight::from_parts(282_756_362, 9282) + // Standard Error: 384 + .saturating_add(Weight::from_parts(171_660, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } @@ -656,13 +686,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_caller_is_root(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `753 + r * (3 ±0)` - // Estimated: `6693 + r * (3 ±0)` - // Minimum execution time: 232_603_000 picoseconds. - Weight::from_parts(260_577_368, 6693) - // Standard Error: 365 - .saturating_add(Weight::from_parts(158_126, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(7_u64)) + // Measured: `756 + r * (3 ±0)` + // Estimated: `9171 + r * (3 ±0)` + // Minimum execution time: 255_131_000 picoseconds. + Weight::from_parts(269_207_006, 9171) + // Standard Error: 542 + .saturating_add(Weight::from_parts(161_597, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(10_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } @@ -683,13 +713,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_address(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `867 + r * (6 ±0)` - // Estimated: `6807 + r * (6 ±0)` - // Minimum execution time: 247_564_000 picoseconds. - Weight::from_parts(275_108_914, 6807) - // Standard Error: 505 - .saturating_add(Weight::from_parts(315_065, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `870 + r * (6 ±0)` + // Estimated: `9285 + r * (6 ±0)` + // Minimum execution time: 272_172_000 picoseconds. + Weight::from_parts(283_747_134, 9285) + // Standard Error: 885 + .saturating_add(Weight::from_parts(343_968, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -710,13 +740,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_gas_left(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `863 + r * (6 ±0)` - // Estimated: `6806 + r * (6 ±0)` - // Minimum execution time: 258_799_000 picoseconds. - Weight::from_parts(274_338_256, 6806) - // Standard Error: 632 - .saturating_add(Weight::from_parts(355_032, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `866 + r * (6 ±0)` + // Estimated: `9284 + r * (6 ±0)` + // Minimum execution time: 263_220_000 picoseconds. + Weight::from_parts(277_062_382, 9284) + // Standard Error: 1_783 + .saturating_add(Weight::from_parts(399_631, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -737,13 +767,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_balance(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1007 + r * (6 ±0)` - // Estimated: `6931 + r * (6 ±0)` - // Minimum execution time: 253_335_000 picoseconds. - Weight::from_parts(273_013_859, 6931) - // Standard Error: 2_007 - .saturating_add(Weight::from_parts(1_540_735, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Measured: `1010 + r * (6 ±0)` + // Estimated: `9409 + r * (6 ±0)` + // Minimum execution time: 253_218_000 picoseconds. + Weight::from_parts(282_251_452, 9409) + // Standard Error: 2_367 + .saturating_add(Weight::from_parts(1_604_060, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -764,13 +794,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_value_transferred(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `877 + r * (6 ±0)` - // Estimated: `6823 + r * (6 ±0)` - // Minimum execution time: 252_325_000 picoseconds. - Weight::from_parts(274_733_944, 6823) - // Standard Error: 603 - .saturating_add(Weight::from_parts(314_467, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `880 + r * (6 ±0)` + // Estimated: `9301 + r * (6 ±0)` + // Minimum execution time: 271_797_000 picoseconds. + Weight::from_parts(288_594_393, 9301) + // Standard Error: 846 + .saturating_add(Weight::from_parts(335_063, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -791,13 +821,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_minimum_balance(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `875 + r * (6 ±0)` - // Estimated: `6816 + r * (6 ±0)` - // Minimum execution time: 250_698_000 picoseconds. - Weight::from_parts(271_707_578, 6816) - // Standard Error: 952 - .saturating_add(Weight::from_parts(318_412, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `878 + r * (6 ±0)` + // Estimated: `9294 + r * (6 ±0)` + // Minimum execution time: 254_997_000 picoseconds. + Weight::from_parts(284_331_894, 9294) + // Standard Error: 885 + .saturating_add(Weight::from_parts(337_073, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -818,13 +848,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_block_number(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `872 + r * (6 ±0)` - // Estimated: `6819 + r * (6 ±0)` - // Minimum execution time: 251_854_000 picoseconds. - Weight::from_parts(272_002_212, 6819) - // Standard Error: 622 - .saturating_add(Weight::from_parts(313_353, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `875 + r * (6 ±0)` + // Estimated: `9297 + r * (6 ±0)` + // Minimum execution time: 273_845_000 picoseconds. + Weight::from_parts(285_291_242, 9297) + // Standard Error: 690 + .saturating_add(Weight::from_parts(332_783, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -845,13 +875,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_now(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `863 + r * (6 ±0)` - // Estimated: `6804 + r * (6 ±0)` - // Minimum execution time: 252_010_000 picoseconds. - Weight::from_parts(270_387_000, 6804) - // Standard Error: 659 - .saturating_add(Weight::from_parts(325_856, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `866 + r * (6 ±0)` + // Estimated: `9282 + r * (6 ±0)` + // Minimum execution time: 268_302_000 picoseconds. + Weight::from_parts(280_463_257, 9282) + // Standard Error: 1_035 + .saturating_add(Weight::from_parts(345_811, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -874,13 +904,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_weight_to_fee(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `937 + r * (14 ±0)` - // Estimated: `6872 + r * (14 ±0)` - // Minimum execution time: 247_933_000 picoseconds. - Weight::from_parts(281_550_162, 6872) - // Standard Error: 660 - .saturating_add(Weight::from_parts(1_090_869, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Measured: `940 + r * (14 ±0)` + // Estimated: `9350 + r * (14 ±0)` + // Minimum execution time: 263_433_000 picoseconds. + Weight::from_parts(290_098_370, 9350) + // Standard Error: 1_905 + .saturating_add(Weight::from_parts(805_299, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 14).saturating_mul(r.into())) } @@ -901,13 +931,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_input(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `865 + r * (6 ±0)` - // Estimated: `6807 + r * (6 ±0)` - // Minimum execution time: 251_158_000 picoseconds. - Weight::from_parts(274_623_152, 6807) - // Standard Error: 491 - .saturating_add(Weight::from_parts(263_916, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `868 + r * (6 ±0)` + // Estimated: `9285 + r * (6 ±0)` + // Minimum execution time: 273_588_000 picoseconds. + Weight::from_parts(287_133_565, 9285) + // Standard Error: 799 + .saturating_add(Weight::from_parts(254_114, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -928,13 +958,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 1048576]`. fn seal_input_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `869` - // Estimated: `6809` - // Minimum execution time: 263_205_000 picoseconds. - Weight::from_parts(216_792_893, 6809) - // Standard Error: 23 - .saturating_add(Weight::from_parts(989, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `872` + // Estimated: `9287` + // Minimum execution time: 257_843_000 picoseconds. + Weight::from_parts(228_177_495, 9287) + // Standard Error: 24 + .saturating_add(Weight::from_parts(985, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) @@ -954,11 +984,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1]`. fn seal_return(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `853 + r * (45 ±0)` - // Estimated: `6793 + r * (45 ±0)` - // Minimum execution time: 239_663_000 picoseconds. - Weight::from_parts(266_124_565, 6793) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `856 + r * (45 ±0)` + // Estimated: `9271 + r * (45 ±0)` + // Minimum execution time: 248_332_000 picoseconds. + Weight::from_parts(274_998_906, 9271) + // Standard Error: 949_561 + .saturating_add(Weight::from_parts(5_570_693, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 45).saturating_mul(r.into())) } @@ -979,13 +1011,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 1048576]`. fn seal_return_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `863` - // Estimated: `6810` - // Minimum execution time: 241_763_000 picoseconds. - Weight::from_parts(266_535_552, 6810) - // Standard Error: 0 - .saturating_add(Weight::from_parts(320, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `866` + // Estimated: `9288` + // Minimum execution time: 272_055_000 picoseconds. + Weight::from_parts(282_280_090, 9288) + // Standard Error: 1 + .saturating_add(Weight::from_parts(330, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) @@ -1011,14 +1043,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1]`. fn seal_terminate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2972 + r * (316 ±0)` - // Estimated: `8912 + r * (5266 ±0)` - // Minimum execution time: 265_888_000 picoseconds. - Weight::from_parts(291_232_232, 8912) - // Standard Error: 845_475 - .saturating_add(Weight::from_parts(104_398_867, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) - .saturating_add(T::DbWeight::get().reads((7_u64).saturating_mul(r.into()))) + // Measured: `2902 + r * (529 ±0)` + // Estimated: `11317 + r * (5479 ±0)` + // Minimum execution time: 274_842_000 picoseconds. + Weight::from_parts(299_824_497, 11317) + // Standard Error: 902_686 + .saturating_add(Weight::from_parts(106_562_902, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) + .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((10_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 5266).saturating_mul(r.into())) @@ -1042,13 +1074,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_random(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `944 + r * (10 ±0)` - // Estimated: `6885 + r * (10 ±0)` - // Minimum execution time: 248_500_000 picoseconds. - Weight::from_parts(282_353_053, 6885) - // Standard Error: 1_144 - .saturating_add(Weight::from_parts(1_193_841, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Measured: `947 + r * (10 ±0)` + // Estimated: `9363 + r * (10 ±0)` + // Minimum execution time: 255_393_000 picoseconds. + Weight::from_parts(309_814_338, 9363) + // Standard Error: 2_978 + .saturating_add(Weight::from_parts(1_203_507, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 10).saturating_mul(r.into())) } @@ -1069,13 +1101,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_deposit_event(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `863 + r * (10 ±0)` - // Estimated: `6805 + r * (10 ±0)` - // Minimum execution time: 248_130_000 picoseconds. - Weight::from_parts(279_583_178, 6805) - // Standard Error: 971 - .saturating_add(Weight::from_parts(1_987_941, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `866 + r * (10 ±0)` + // Estimated: `9283 + r * (10 ±0)` + // Minimum execution time: 250_378_000 picoseconds. + Weight::from_parts(287_003_144, 9283) + // Standard Error: 2_726 + .saturating_add(Weight::from_parts(1_850_967, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 10).saturating_mul(r.into())) } @@ -1097,15 +1129,15 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 16384]`. fn seal_deposit_event_per_topic_and_byte(t: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `880 + t * (32 ±0)` - // Estimated: `6825 + t * (2508 ±0)` - // Minimum execution time: 258_594_000 picoseconds. - Weight::from_parts(276_734_422, 6825) - // Standard Error: 102_093 - .saturating_add(Weight::from_parts(2_559_383, 0).saturating_mul(t.into())) - // Standard Error: 28 - .saturating_add(Weight::from_parts(501, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `883 + t * (32 ±0)` + // Estimated: `9303 + t * (2508 ±0)` + // Minimum execution time: 266_787_000 picoseconds. + Weight::from_parts(284_368_661, 9303) + // Standard Error: 121_315 + .saturating_add(Weight::from_parts(2_690_211, 0).saturating_mul(t.into())) + // Standard Error: 33 + .saturating_add(Weight::from_parts(789, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(t.into()))) @@ -1128,13 +1160,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_debug_message(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `862 + r * (7 ±0)` - // Estimated: `6807 + r * (7 ±0)` - // Minimum execution time: 154_564_000 picoseconds. - Weight::from_parts(168_931_365, 6807) - // Standard Error: 349 - .saturating_add(Weight::from_parts(226_848, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `865 + r * (7 ±0)` + // Estimated: `9285 + r * (7 ±0)` + // Minimum execution time: 166_804_000 picoseconds. + Weight::from_parts(175_118_291, 9285) + // Standard Error: 398 + .saturating_add(Weight::from_parts(220_961, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 7).saturating_mul(r.into())) } @@ -1155,13 +1187,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `i` is `[0, 1048576]`. fn seal_debug_message_per_byte(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `125813` - // Estimated: `131755` - // Minimum execution time: 394_382_000 picoseconds. - Weight::from_parts(376_780_500, 131755) + // Measured: `125816` + // Estimated: `131758` + // Minimum execution time: 398_436_000 picoseconds. + Weight::from_parts(385_003_285, 131758) // Standard Error: 12 - .saturating_add(Weight::from_parts(1_026, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + .saturating_add(Weight::from_parts(1_038, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -1169,13 +1201,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 800]`. fn seal_set_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `924 + r * (292 ±0)` - // Estimated: `926 + r * (293 ±0)` - // Minimum execution time: 249_757_000 picoseconds. - Weight::from_parts(177_324_374, 926) - // Standard Error: 9_512 - .saturating_add(Weight::from_parts(6_176_717, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `927 + r * (292 ±0)` + // Estimated: `929 + r * (293 ±0)` + // Minimum execution time: 274_099_000 picoseconds. + Weight::from_parts(174_282_817, 929) + // Standard Error: 10_547 + .saturating_add(Weight::from_parts(6_262_173, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -1186,13 +1218,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 16384]`. fn seal_set_storage_per_new_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1447` - // Estimated: `1430` - // Minimum execution time: 267_564_000 picoseconds. - Weight::from_parts(328_701_080, 1430) - // Standard Error: 61 - .saturating_add(Weight::from_parts(576, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(12_u64)) + // Measured: `1450` + // Estimated: `1433` + // Minimum execution time: 274_061_000 picoseconds. + Weight::from_parts(334_755_333, 1433) + // Standard Error: 66 + .saturating_add(Weight::from_parts(771, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(15_u64)) .saturating_add(T::DbWeight::get().writes(8_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -1200,13 +1232,11 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 16384]`. fn seal_set_storage_per_old_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1253 + n * (1 ±0)` - // Estimated: `1253 + n * (1 ±0)` - // Minimum execution time: 266_347_000 picoseconds. - Weight::from_parts(289_824_718, 1253) - // Standard Error: 34 - .saturating_add(Weight::from_parts(184, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Measured: `1256 + n * (1 ±0)` + // Estimated: `1256 + n * (1 ±0)` + // Minimum execution time: 272_657_000 picoseconds. + Weight::from_parts(299_061_594, 1256) + .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1215,13 +1245,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 800]`. fn seal_clear_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `921 + r * (288 ±0)` - // Estimated: `927 + r * (289 ±0)` - // Minimum execution time: 247_207_000 picoseconds. - Weight::from_parts(179_856_075, 927) - // Standard Error: 9_383 - .saturating_add(Weight::from_parts(6_053_198, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `924 + r * (288 ±0)` + // Estimated: `930 + r * (289 ±0)` + // Minimum execution time: 270_524_000 picoseconds. + Weight::from_parts(177_959_667, 930) + // Standard Error: 10_397 + .saturating_add(Weight::from_parts(6_270_181, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -1232,13 +1262,11 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 16384]`. fn seal_clear_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1249 + n * (1 ±0)` - // Estimated: `1249 + n * (1 ±0)` - // Minimum execution time: 262_655_000 picoseconds. - Weight::from_parts(289_482_543, 1249) - // Standard Error: 35 - .saturating_add(Weight::from_parts(92, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Measured: `1252 + n * (1 ±0)` + // Estimated: `1252 + n * (1 ±0)` + // Minimum execution time: 270_757_000 picoseconds. + Weight::from_parts(298_627_896, 1252) + .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1247,13 +1275,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 800]`. fn seal_get_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `921 + r * (296 ±0)` - // Estimated: `923 + r * (297 ±0)` - // Minimum execution time: 247_414_000 picoseconds. - Weight::from_parts(203_317_182, 923) - // Standard Error: 7_191 - .saturating_add(Weight::from_parts(4_925_154, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `924 + r * (296 ±0)` + // Estimated: `926 + r * (297 ±0)` + // Minimum execution time: 268_082_000 picoseconds. + Weight::from_parts(202_583_730, 926) + // Standard Error: 9_029 + .saturating_add(Weight::from_parts(5_028_517, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 297).saturating_mul(r.into())) @@ -1263,13 +1291,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 16384]`. fn seal_get_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1265 + n * (1 ±0)` - // Estimated: `1265 + n * (1 ±0)` - // Minimum execution time: 258_910_000 picoseconds. - Weight::from_parts(283_086_514, 1265) - // Standard Error: 39 - .saturating_add(Weight::from_parts(980, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Measured: `1268 + n * (1 ±0)` + // Estimated: `1268 + n * (1 ±0)` + // Minimum execution time: 274_100_000 picoseconds. + Weight::from_parts(296_981_515, 1268) + // Standard Error: 34 + .saturating_add(Weight::from_parts(578, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1278,13 +1306,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 800]`. fn seal_contains_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `932 + r * (288 ±0)` - // Estimated: `929 + r * (289 ±0)` - // Minimum execution time: 252_410_000 picoseconds. - Weight::from_parts(201_227_879, 929) - // Standard Error: 6_899 - .saturating_add(Weight::from_parts(4_774_983, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `935 + r * (288 ±0)` + // Estimated: `932 + r * (289 ±0)` + // Minimum execution time: 269_697_000 picoseconds. + Weight::from_parts(198_346_516, 932) + // Standard Error: 8_623 + .saturating_add(Weight::from_parts(4_964_116, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 289).saturating_mul(r.into())) @@ -1294,13 +1322,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 16384]`. fn seal_contains_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1252 + n * (1 ±0)` - // Estimated: `1252 + n * (1 ±0)` - // Minimum execution time: 259_053_000 picoseconds. - Weight::from_parts(283_392_084, 1252) - // Standard Error: 41 - .saturating_add(Weight::from_parts(213, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Measured: `1255 + n * (1 ±0)` + // Estimated: `1255 + n * (1 ±0)` + // Minimum execution time: 264_210_000 picoseconds. + Weight::from_parts(291_387_652, 1255) + // Standard Error: 36 + .saturating_add(Weight::from_parts(524, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1309,13 +1337,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 800]`. fn seal_take_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `914 + r * (296 ±0)` - // Estimated: `919 + r * (297 ±0)` - // Minimum execution time: 251_371_000 picoseconds. - Weight::from_parts(177_119_717, 919) - // Standard Error: 9_421 - .saturating_add(Weight::from_parts(6_226_005, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `917 + r * (296 ±0)` + // Estimated: `922 + r * (297 ±0)` + // Minimum execution time: 267_219_000 picoseconds. + Weight::from_parts(175_866_982, 922) + // Standard Error: 11_275 + .saturating_add(Weight::from_parts(6_424_755, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -1326,13 +1354,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 16384]`. fn seal_take_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1266 + n * (1 ±0)` - // Estimated: `1266 + n * (1 ±0)` - // Minimum execution time: 263_350_000 picoseconds. - Weight::from_parts(284_323_917, 1266) - // Standard Error: 31 - .saturating_add(Weight::from_parts(921, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Measured: `1269 + n * (1 ±0)` + // Estimated: `1269 + n * (1 ±0)` + // Minimum execution time: 274_634_000 picoseconds. + Weight::from_parts(294_272_009, 1269) + // Standard Error: 36 + .saturating_add(Weight::from_parts(1_219, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1353,13 +1381,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_transfer(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1415 + r * (45 ±0)` - // Estimated: `7307 + r * (2520 ±0)` - // Minimum execution time: 248_701_000 picoseconds. - Weight::from_parts(17_811_969, 7307) - // Standard Error: 35_154 - .saturating_add(Weight::from_parts(31_809_738, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Measured: `1418 + r * (45 ±0)` + // Estimated: `9785 + r * (2520 ±0)` + // Minimum execution time: 266_833_000 picoseconds. + Weight::from_parts(186_049_741, 9785) + // Standard Error: 40_311 + .saturating_add(Weight::from_parts(31_365_585, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -1382,13 +1410,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 800]`. fn seal_call(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1260 + r * (245 ±0)` - // Estimated: `9440 + r * (2721 ±0)` - // Minimum execution time: 247_335_000 picoseconds. - Weight::from_parts(264_025_000, 9440) - // Standard Error: 121_299 - .saturating_add(Weight::from_parts(234_770_827, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) + // Measured: `1263 + r * (245 ±0)` + // Estimated: `9635 + r * (2721 ±0)` + // Minimum execution time: 272_140_000 picoseconds. + Weight::from_parts(275_009_000, 9635) + // Standard Error: 99_187 + .saturating_add(Weight::from_parts(240_702_479, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(14_u64)) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(r.into()))) @@ -1412,12 +1440,12 @@ impl WeightInfo for SubstrateWeight { fn seal_delegate_call(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + r * (576 ±0)` - // Estimated: `6812 + r * (2637 ±3)` - // Minimum execution time: 261_011_000 picoseconds. - Weight::from_parts(264_554_000, 6812) - // Standard Error: 104_415 - .saturating_add(Weight::from_parts(231_627_084, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Estimated: `9290 + r * (2637 ±10)` + // Minimum execution time: 263_664_000 picoseconds. + Weight::from_parts(277_240_000, 9290) + // Standard Error: 169_147 + .saturating_add(Weight::from_parts(240_084_929, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -1441,15 +1469,15 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 1048576]`. fn seal_call_per_transfer_clone_byte(t: u32, c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1307 + t * (277 ±0)` - // Estimated: `12197 + t * (5227 ±0)` - // Minimum execution time: 445_561_000 picoseconds. - Weight::from_parts(62_287_490, 12197) - // Standard Error: 11_797_697 - .saturating_add(Weight::from_parts(357_530_529, 0).saturating_mul(t.into())) + // Measured: `1310 + t * (277 ±0)` + // Estimated: `12200 + t * (5227 ±0)` + // Minimum execution time: 464_644_000 picoseconds. + Weight::from_parts(40_377_313, 12200) + // Standard Error: 12_060_226 + .saturating_add(Weight::from_parts(380_635_982, 0).saturating_mul(t.into())) // Standard Error: 17 - .saturating_add(Weight::from_parts(970, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(13_u64)) + .saturating_add(Weight::from_parts(1_014, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(16_u64)) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes(6_u64)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(t.into()))) @@ -1476,13 +1504,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[1, 800]`. fn seal_instantiate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1278 + r * (255 ±0)` - // Estimated: `9620 + r * (2731 ±0)` - // Minimum execution time: 621_897_000 picoseconds. - Weight::from_parts(631_687_000, 9620) - // Standard Error: 215_241 - .saturating_add(Weight::from_parts(350_527_831, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) + // Measured: `1281 + r * (255 ±0)` + // Estimated: `9623 + r * (2731 ±0)` + // Minimum execution time: 641_845_000 picoseconds. + Weight::from_parts(649_908_000, 9623) + // Standard Error: 278_514 + .saturating_add(Weight::from_parts(361_164_598, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(14_u64)) .saturating_add(T::DbWeight::get().reads((6_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(7_u64)) .saturating_add(T::DbWeight::get().writes((5_u64).saturating_mul(r.into()))) @@ -1511,17 +1539,15 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 983040]`. fn seal_instantiate_per_transfer_input_salt_byte(t: u32, i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1303 + t * (104 ±0)` - // Estimated: `12211 + t * (2549 ±1)` - // Minimum execution time: 2_181_184_000 picoseconds. - Weight::from_parts(1_194_190_111, 12211) - // Standard Error: 11_578_766 - .saturating_add(Weight::from_parts(6_361_884, 0).saturating_mul(t.into())) - // Standard Error: 18 - .saturating_add(Weight::from_parts(1_025, 0).saturating_mul(i.into())) - // Standard Error: 18 - .saturating_add(Weight::from_parts(1_158, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(16_u64)) + // Measured: `1306 + t * (104 ±0)` + // Estimated: `12214 + t * (2549 ±1)` + // Minimum execution time: 2_111_632_000 picoseconds. + Weight::from_parts(1_213_125_745, 12214) + // Standard Error: 22 + .saturating_add(Weight::from_parts(1_055, 0).saturating_mul(i.into())) + // Standard Error: 22 + .saturating_add(Weight::from_parts(1_192, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(19_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes(11_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(t.into()))) @@ -1544,13 +1570,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_hash_sha2_256(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `862 + r * (8 ±0)` - // Estimated: `6801 + r * (8 ±0)` - // Minimum execution time: 241_609_000 picoseconds. - Weight::from_parts(268_716_874, 6801) - // Standard Error: 617 - .saturating_add(Weight::from_parts(377_753, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `865 + r * (8 ±0)` + // Estimated: `9279 + r * (8 ±0)` + // Minimum execution time: 264_227_000 picoseconds. + Weight::from_parts(281_015_995, 9279) + // Standard Error: 710 + .saturating_add(Weight::from_parts(365_734, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } @@ -1571,13 +1597,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 1048576]`. fn seal_hash_sha2_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `870` - // Estimated: `6808` - // Minimum execution time: 261_296_000 picoseconds. - Weight::from_parts(255_531_654, 6808) + // Measured: `873` + // Estimated: `9286` + // Minimum execution time: 270_709_000 picoseconds. + Weight::from_parts(268_416_051, 9286) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_081, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + .saturating_add(Weight::from_parts(1_080, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) @@ -1597,13 +1623,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_hash_keccak_256(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `864 + r * (8 ±0)` - // Estimated: `6806 + r * (8 ±0)` - // Minimum execution time: 243_583_000 picoseconds. - Weight::from_parts(270_025_058, 6806) - // Standard Error: 560 - .saturating_add(Weight::from_parts(767_519, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `867 + r * (8 ±0)` + // Estimated: `9284 + r * (8 ±0)` + // Minimum execution time: 267_283_000 picoseconds. + Weight::from_parts(280_706_659, 9284) + // Standard Error: 540 + .saturating_add(Weight::from_parts(790_312, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } @@ -1624,13 +1650,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 1048576]`. fn seal_hash_keccak_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `872` - // Estimated: `6814` - // Minimum execution time: 253_798_000 picoseconds. - Weight::from_parts(265_542_351, 6814) - // Standard Error: 0 - .saturating_add(Weight::from_parts(3_343, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `875` + // Estimated: `9292` + // Minimum execution time: 262_010_000 picoseconds. + Weight::from_parts(273_278_744, 9292) + // Standard Error: 1 + .saturating_add(Weight::from_parts(3_362, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) @@ -1650,13 +1676,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_hash_blake2_256(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `864 + r * (8 ±0)` - // Estimated: `6808 + r * (8 ±0)` - // Minimum execution time: 247_332_000 picoseconds. - Weight::from_parts(269_183_656, 6808) - // Standard Error: 665 - .saturating_add(Weight::from_parts(443_386, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `867 + r * (8 ±0)` + // Estimated: `9286 + r * (8 ±0)` + // Minimum execution time: 268_698_000 picoseconds. + Weight::from_parts(282_890_578, 9286) + // Standard Error: 622 + .saturating_add(Weight::from_parts(441_131, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } @@ -1677,13 +1703,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `872` - // Estimated: `6813` - // Minimum execution time: 250_855_000 picoseconds. - Weight::from_parts(258_752_975, 6813) + // Measured: `875` + // Estimated: `9291` + // Minimum execution time: 252_846_000 picoseconds. + Weight::from_parts(267_657_561, 9291) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_202, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + .saturating_add(Weight::from_parts(1_208, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) @@ -1703,13 +1729,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_hash_blake2_128(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `864 + r * (8 ±0)` - // Estimated: `6805 + r * (8 ±0)` - // Minimum execution time: 240_733_000 picoseconds. - Weight::from_parts(269_134_358, 6805) - // Standard Error: 512 - .saturating_add(Weight::from_parts(440_043, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `867 + r * (8 ±0)` + // Estimated: `9283 + r * (8 ±0)` + // Minimum execution time: 266_899_000 picoseconds. + Weight::from_parts(276_862_067, 9283) + // Standard Error: 1_249 + .saturating_add(Weight::from_parts(443_970, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } @@ -1730,13 +1756,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_128_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `872` - // Estimated: `6811` - // Minimum execution time: 247_377_000 picoseconds. - Weight::from_parts(261_077_322, 6811) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_195, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `875` + // Estimated: `9289` + // Minimum execution time: 265_413_000 picoseconds. + Weight::from_parts(265_600_840, 9289) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_203, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) @@ -1756,13 +1782,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 125697]`. fn seal_sr25519_verify_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `997 + n * (1 ±0)` - // Estimated: `6934 + n * (1 ±0)` - // Minimum execution time: 307_337_000 picoseconds. - Weight::from_parts(326_710_473, 6934) - // Standard Error: 9 - .saturating_add(Weight::from_parts(5_765, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `1000 + n * (1 ±0)` + // Estimated: `9412 + n * (1 ±0)` + // Minimum execution time: 328_341_000 picoseconds. + Weight::from_parts(341_038_581, 9412) + // Standard Error: 10 + .saturating_add(Weight::from_parts(5_863, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1783,13 +1809,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 160]`. fn seal_sr25519_verify(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `805 + r * (112 ±0)` - // Estimated: `6748 + r * (112 ±0)` - // Minimum execution time: 245_432_000 picoseconds. - Weight::from_parts(294_206_377, 6748) - // Standard Error: 7_229 - .saturating_add(Weight::from_parts(41_480_485, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `808 + r * (112 ±0)` + // Estimated: `9226 + r * (112 ±0)` + // Minimum execution time: 272_730_000 picoseconds. + Weight::from_parts(339_349_232, 9226) + // Standard Error: 13_004 + .saturating_add(Weight::from_parts(41_649_026, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 112).saturating_mul(r.into())) } @@ -1810,13 +1836,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 160]`. fn seal_ecdsa_recover(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `907 + r * (76 ±0)` - // Estimated: `6802 + r * (77 ±0)` - // Minimum execution time: 247_788_000 picoseconds. - Weight::from_parts(303_940_062, 6802) - // Standard Error: 10_671 - .saturating_add(Weight::from_parts(45_730_772, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `910 + r * (76 ±0)` + // Estimated: `9279 + r * (77 ±0)` + // Minimum execution time: 273_892_000 picoseconds. + Weight::from_parts(352_853_960, 9279) + // Standard Error: 16_745 + .saturating_add(Weight::from_parts(45_853_294, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 77).saturating_mul(r.into())) } @@ -1837,13 +1863,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 160]`. fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `877 + r * (42 ±0)` - // Estimated: `6816 + r * (42 ±0)` - // Minimum execution time: 248_825_000 picoseconds. - Weight::from_parts(286_832_225, 6816) - // Standard Error: 5_274 - .saturating_add(Weight::from_parts(11_889_262, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `880 + r * (42 ±0)` + // Estimated: `9294 + r * (42 ±0)` + // Minimum execution time: 268_431_000 picoseconds. + Weight::from_parts(319_727_796, 9294) + // Standard Error: 10_276 + .saturating_add(Weight::from_parts(11_928_882, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 42).saturating_mul(r.into())) } @@ -1865,12 +1891,12 @@ impl WeightInfo for SubstrateWeight { fn seal_set_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + r * (965 ±0)` - // Estimated: `6807 + r * (3090 ±7)` - // Minimum execution time: 244_982_000 picoseconds. - Weight::from_parts(265_297_000, 6807) - // Standard Error: 39_895 - .saturating_add(Weight::from_parts(22_435_888, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Estimated: `9285 + r * (3090 ±7)` + // Minimum execution time: 275_482_000 picoseconds. + Weight::from_parts(279_155_000, 9285) + // Standard Error: 65_388 + .saturating_add(Weight::from_parts(26_634_187, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(r.into()))) @@ -1891,15 +1917,15 @@ impl WeightInfo for SubstrateWeight { /// Storage: `System::EventTopics` (r:2 w:2) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 32]`. - fn add_delegate_dependency(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `928 + r * (131 ±0)` - // Estimated: `6878 + r * (2606 ±0)` - // Minimum execution time: 246_455_000 picoseconds. - Weight::from_parts(275_334_919, 6878) - // Standard Error: 20_911 - .saturating_add(Weight::from_parts(6_427_525, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + fn lock_delegate_dependency(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `937 + r * (131 ±0)` + // Estimated: `9346 + r * (2607 ±0)` + // Minimum execution time: 270_001_000 picoseconds. + Weight::from_parts(286_792_689, 9346) + // Standard Error: 21_074 + .saturating_add(Weight::from_parts(6_465_885, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -1920,15 +1946,15 @@ impl WeightInfo for SubstrateWeight { /// Storage: `System::EventTopics` (r:2 w:2) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 32]`. - fn remove_delegate_dependency(r: u32, ) -> Weight { + fn unlock_delegate_dependency(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `969 + r * (183 ±0)` // Estimated: `129453 + r * (2568 ±0)` - // Minimum execution time: 254_472_000 picoseconds. - Weight::from_parts(280_657_909, 129453) - // Standard Error: 20_131 - .saturating_add(Weight::from_parts(5_644_006, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Minimum execution time: 270_652_000 picoseconds. + Weight::from_parts(293_369_452, 129453) + // Standard Error: 24_321 + .saturating_add(Weight::from_parts(5_575_600, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -1951,13 +1977,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_reentrance_count(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `858 + r * (3 ±0)` - // Estimated: `6804 + r * (3 ±0)` - // Minimum execution time: 250_535_000 picoseconds. - Weight::from_parts(270_318_376, 6804) - // Standard Error: 386 - .saturating_add(Weight::from_parts(174_627, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `861 + r * (3 ±0)` + // Estimated: `9282 + r * (3 ±0)` + // Minimum execution time: 267_749_000 picoseconds. + Weight::from_parts(280_373_341, 9282) + // Standard Error: 464 + .saturating_add(Weight::from_parts(170_398, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } @@ -1978,13 +2004,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_account_reentrance_count(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2109 + r * (39 ±0)` - // Estimated: `7899 + r * (40 ±0)` - // Minimum execution time: 248_174_000 picoseconds. - Weight::from_parts(301_826_520, 7899) - // Standard Error: 801 - .saturating_add(Weight::from_parts(248_479, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `2112 + r * (39 ±0)` + // Estimated: `10377 + r * (40 ±0)` + // Minimum execution time: 270_260_000 picoseconds. + Weight::from_parts(337_969_172, 10377) + // Standard Error: 1_557 + .saturating_add(Weight::from_parts(305_735, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 40).saturating_mul(r.into())) } @@ -2007,13 +2033,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_instantiation_nonce(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `861 + r * (3 ±0)` - // Estimated: `6801 + r * (3 ±0)` - // Minimum execution time: 246_540_000 picoseconds. - Weight::from_parts(268_913_509, 6801) - // Standard Error: 378 - .saturating_add(Weight::from_parts(154_950, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Measured: `864 + r * (3 ±0)` + // Estimated: `9279 + r * (3 ±0)` + // Minimum execution time: 265_607_000 picoseconds. + Weight::from_parts(283_396_630, 9279) + // Standard Error: 449 + .saturating_add(Weight::from_parts(149_018, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } @@ -2022,10 +2048,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_777_000 picoseconds. - Weight::from_parts(1_707_601, 0) - // Standard Error: 14 - .saturating_add(Weight::from_parts(15_392, 0).saturating_mul(r.into())) + // Minimum execution time: 1_813_000 picoseconds. + Weight::from_parts(1_264_945, 0) + // Standard Error: 19 + .saturating_add(Weight::from_parts(15_098, 0).saturating_mul(r.into())) } } @@ -2037,8 +2063,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 1_997_000 picoseconds. - Weight::from_parts(2_130_000, 1627) + // Minimum execution time: 2_054_000 picoseconds. + Weight::from_parts(2_178_000, 1627) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -2048,10 +2074,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `452 + k * (69 ±0)` // Estimated: `442 + k * (70 ±0)` - // Minimum execution time: 12_276_000 picoseconds. - Weight::from_parts(1_593_881, 442) - // Standard Error: 1_135 - .saturating_add(Weight::from_parts(1_109_302, 0).saturating_mul(k.into())) + // Minimum execution time: 12_119_000 picoseconds. + Weight::from_parts(12_536_000, 442) + // Standard Error: 1_254 + .saturating_add(Weight::from_parts(1_161_885, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -2065,10 +2091,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `211 + c * (1 ±0)` // Estimated: `6149 + c * (1 ±0)` - // Minimum execution time: 8_176_000 picoseconds. - Weight::from_parts(8_555_388, 6149) + // Minimum execution time: 8_146_000 picoseconds. + Weight::from_parts(8_560_745, 6149) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_184, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(1_182, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -2081,8 +2107,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `510` // Estimated: `6450` - // Minimum execution time: 16_270_000 picoseconds. - Weight::from_parts(16_779_000, 6450) + // Minimum execution time: 16_183_000 picoseconds. + Weight::from_parts(16_825_000, 6450) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -2095,10 +2121,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `171 + k * (1 ±0)` // Estimated: `3635 + k * (1 ±0)` - // Minimum execution time: 3_572_000 picoseconds. - Weight::from_parts(1_950_905, 3635) - // Standard Error: 1_597 - .saturating_add(Weight::from_parts(1_123_190, 0).saturating_mul(k.into())) + // Minimum execution time: 3_645_000 picoseconds. + Weight::from_parts(604_365, 3635) + // Standard Error: 1_013 + .saturating_add(Weight::from_parts(1_100_277, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -2115,13 +2141,13 @@ impl WeightInfo for () { /// The range of component `c` is `[0, 125952]`. fn v12_migration_step(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `325 + c * (1 ±0)` - // Estimated: `6263 + c * (1 ±0)` - // Minimum execution time: 16_873_000 picoseconds. - Weight::from_parts(16_790_402, 6263) + // Measured: `328 + c * (1 ±0)` + // Estimated: `6266 + c * (1 ±0)` + // Minimum execution time: 19_500_000 picoseconds. + Weight::from_parts(20_074_895, 6266) // Standard Error: 1 - .saturating_add(Weight::from_parts(396, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(Weight::from_parts(422, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) } @@ -2131,8 +2157,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `440` // Estimated: `6380` - // Minimum execution time: 11_904_000 picoseconds. - Weight::from_parts(12_785_000, 6380) + // Minimum execution time: 12_530_000 picoseconds. + Weight::from_parts(13_362_000, 6380) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -2146,8 +2172,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `352` // Estimated: `6292` - // Minimum execution time: 44_920_000 picoseconds. - Weight::from_parts(46_163_000, 6292) + // Minimum execution time: 44_275_000 picoseconds. + Weight::from_parts(45_718_000, 6292) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -2159,8 +2185,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `594` // Estimated: `6534` - // Minimum execution time: 53_864_000 picoseconds. - Weight::from_parts(55_139_000, 6534) + // Minimum execution time: 55_095_000 picoseconds. + Weight::from_parts(58_009_000, 6534) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -2170,8 +2196,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_375_000 picoseconds. - Weight::from_parts(2_487_000, 1627) + // Minimum execution time: 2_455_000 picoseconds. + Weight::from_parts(2_608_000, 1627) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -2183,8 +2209,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `166` // Estimated: `3631` - // Minimum execution time: 11_580_000 picoseconds. - Weight::from_parts(11_980_000, 3631) + // Minimum execution time: 11_207_000 picoseconds. + Weight::from_parts(11_802_000, 3631) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -2194,8 +2220,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 4_557_000 picoseconds. - Weight::from_parts(4_807_000, 3607) + // Minimum execution time: 4_581_000 picoseconds. + Weight::from_parts(4_781_000, 3607) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -2206,8 +2232,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `167` // Estimated: `3632` - // Minimum execution time: 6_253_000 picoseconds. - Weight::from_parts(6_479_000, 3632) + // Minimum execution time: 5_887_000 picoseconds. + Weight::from_parts(6_393_000, 3632) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -2218,8 +2244,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 6_166_000 picoseconds. - Weight::from_parts(6_545_000, 3607) + // Minimum execution time: 6_025_000 picoseconds. + Weight::from_parts(6_298_000, 3607) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -2240,13 +2266,13 @@ impl WeightInfo for () { /// The range of component `c` is `[0, 125952]`. fn call_with_code_per_byte(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `801 + c * (1 ±0)` - // Estimated: `6739 + c * (1 ±0)` - // Minimum execution time: 282_232_000 picoseconds. - Weight::from_parts(266_148_573, 6739) - // Standard Error: 69 - .saturating_add(Weight::from_parts(34_592, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `804 + c * (1 ±0)` + // Estimated: `9217 + c * (1 ±0)` + // Minimum execution time: 294_976_000 picoseconds. + Weight::from_parts(277_235_948, 9217) + // Standard Error: 65 + .saturating_add(Weight::from_parts(34_445, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) } @@ -2273,17 +2299,17 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 1048576]`. fn instantiate_with_code(c: u32, i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `323` - // Estimated: `8737` - // Minimum execution time: 3_760_879_000 picoseconds. - Weight::from_parts(794_812_431, 8737) - // Standard Error: 149 - .saturating_add(Weight::from_parts(101_881, 0).saturating_mul(c.into())) - // Standard Error: 18 - .saturating_add(Weight::from_parts(1_404, 0).saturating_mul(i.into())) - // Standard Error: 18 - .saturating_add(Weight::from_parts(1_544, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) + // Measured: `326` + // Estimated: `8740` + // Minimum execution time: 3_864_558_000 picoseconds. + Weight::from_parts(421_676_889, 8740) + // Standard Error: 188 + .saturating_add(Weight::from_parts(103_788, 0).saturating_mul(c.into())) + // Standard Error: 22 + .saturating_add(Weight::from_parts(1_715, 0).saturating_mul(i.into())) + // Standard Error: 22 + .saturating_add(Weight::from_parts(1_774, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(14_u64)) .saturating_add(RocksDbWeight::get().writes(10_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) @@ -2308,15 +2334,15 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 1048576]`. fn instantiate(i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `560` - // Estimated: `6504` - // Minimum execution time: 1_953_162_000 picoseconds. - Weight::from_parts(374_252_840, 6504) - // Standard Error: 7 - .saturating_add(Weight::from_parts(1_630, 0).saturating_mul(i.into())) - // Standard Error: 7 - .saturating_add(Weight::from_parts(1_650, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(10_u64)) + // Measured: `563` + // Estimated: `8982` + // Minimum execution time: 2_069_276_000 picoseconds. + Weight::from_parts(377_210_279, 8982) + // Standard Error: 9 + .saturating_add(Weight::from_parts(1_719, 0).saturating_mul(i.into())) + // Standard Error: 9 + .saturating_add(Weight::from_parts(1_728, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(13_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) @@ -2335,11 +2361,11 @@ impl WeightInfo for () { /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) fn call() -> Weight { // Proof Size summary in bytes: - // Measured: `826` - // Estimated: `6766` - // Minimum execution time: 187_899_000 picoseconds. - Weight::from_parts(195_510_000, 6766) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `829` + // Estimated: `9244` + // Minimum execution time: 199_037_000 picoseconds. + Weight::from_parts(213_931_000, 9244) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) @@ -2353,15 +2379,39 @@ impl WeightInfo for () { /// Storage: `Contracts::PristineCode` (r:0 w:1) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) /// The range of component `c` is `[0, 125952]`. - fn upload_code(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `3607` - // Minimum execution time: 254_800_000 picoseconds. - Weight::from_parts(285_603_050, 3607) - // Standard Error: 62 - .saturating_add(Weight::from_parts(66_212, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(4_u64)) + fn upload_code_determinism_enforced(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `6085` + // Minimum execution time: 278_482_000 picoseconds. + Weight::from_parts(262_753_879, 6085) + // Standard Error: 112 + .saturating_add(Weight::from_parts(66_129, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(6_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Storage: `System::EventTopics` (r:1 w:1) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:0 w:1) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// The range of component `c` is `[0, 125952]`. + fn upload_code_determinism_relaxed(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `6085` + // Minimum execution time: 286_902_000 picoseconds. + Weight::from_parts(269_003_959, 6085) + // Standard Error: 123 + .saturating_add(Weight::from_parts(66_629, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) @@ -2378,8 +2428,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `315` // Estimated: `3780` - // Minimum execution time: 43_553_000 picoseconds. - Weight::from_parts(45_036_000, 3780) + // Minimum execution time: 44_128_000 picoseconds. + Weight::from_parts(46_044_000, 3780) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -2395,8 +2445,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `552` // Estimated: `8967` - // Minimum execution time: 33_223_000 picoseconds. - Weight::from_parts(34_385_000, 8967) + // Minimum execution time: 33_567_000 picoseconds. + Weight::from_parts(35_057_000, 8967) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -2417,13 +2467,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_caller(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `866 + r * (6 ±0)` - // Estimated: `6806 + r * (6 ±0)` - // Minimum execution time: 254_213_000 picoseconds. - Weight::from_parts(273_464_980, 6806) - // Standard Error: 1_362 - .saturating_add(Weight::from_parts(322_619, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `869 + r * (6 ±0)` + // Estimated: `9284 + r * (6 ±0)` + // Minimum execution time: 272_376_000 picoseconds. + Weight::from_parts(284_162_161, 9284) + // Standard Error: 501 + .saturating_add(Weight::from_parts(341_575, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -2444,13 +2494,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_is_contract(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `922 + r * (209 ±0)` - // Estimated: `6826 + r * (2684 ±0)` - // Minimum execution time: 250_273_000 picoseconds. - Weight::from_parts(122_072_782, 6826) - // Standard Error: 5_629 - .saturating_add(Weight::from_parts(3_490_256, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `925 + r * (209 ±0)` + // Estimated: `9304 + r * (2684 ±0)` + // Minimum execution time: 256_990_000 picoseconds. + Weight::from_parts(107_167_044, 9304) + // Standard Error: 7_545 + .saturating_add(Weight::from_parts(3_628_112, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 2684).saturating_mul(r.into())) @@ -2472,13 +2522,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `921 + r * (213 ±0)` - // Estimated: `6830 + r * (2688 ±0)` - // Minimum execution time: 255_187_000 picoseconds. - Weight::from_parts(118_082_505, 6830) - // Standard Error: 6_302 - .saturating_add(Weight::from_parts(4_246_968, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `924 + r * (213 ±0)` + // Estimated: `9308 + r * (2688 ±0)` + // Minimum execution time: 272_889_000 picoseconds. + Weight::from_parts(110_341_799, 9308) + // Standard Error: 7_881 + .saturating_add(Weight::from_parts(4_427_043, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 2688).saturating_mul(r.into())) @@ -2500,13 +2550,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_own_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `873 + r * (6 ±0)` - // Estimated: `6815 + r * (6 ±0)` - // Minimum execution time: 256_833_000 picoseconds. - Weight::from_parts(273_330_216, 6815) - // Standard Error: 881 - .saturating_add(Weight::from_parts(400_105, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `876 + r * (6 ±0)` + // Estimated: `9293 + r * (6 ±0)` + // Minimum execution time: 275_027_000 picoseconds. + Weight::from_parts(283_302_223, 9293) + // Standard Error: 1_179 + .saturating_add(Weight::from_parts(436_023, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -2527,13 +2577,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_caller_is_origin(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `863 + r * (3 ±0)` - // Estimated: `6804 + r * (3 ±0)` - // Minimum execution time: 244_193_000 picoseconds. - Weight::from_parts(271_221_908, 6804) - // Standard Error: 442 - .saturating_add(Weight::from_parts(176_480, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `866 + r * (3 ±0)` + // Estimated: `9282 + r * (3 ±0)` + // Minimum execution time: 270_137_000 picoseconds. + Weight::from_parts(282_756_362, 9282) + // Standard Error: 384 + .saturating_add(Weight::from_parts(171_660, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } @@ -2552,13 +2602,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_caller_is_root(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `753 + r * (3 ±0)` - // Estimated: `6693 + r * (3 ±0)` - // Minimum execution time: 232_603_000 picoseconds. - Weight::from_parts(260_577_368, 6693) - // Standard Error: 365 - .saturating_add(Weight::from_parts(158_126, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(7_u64)) + // Measured: `756 + r * (3 ±0)` + // Estimated: `9171 + r * (3 ±0)` + // Minimum execution time: 255_131_000 picoseconds. + Weight::from_parts(269_207_006, 9171) + // Standard Error: 542 + .saturating_add(Weight::from_parts(161_597, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(10_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } @@ -2579,13 +2629,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_address(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `867 + r * (6 ±0)` - // Estimated: `6807 + r * (6 ±0)` - // Minimum execution time: 247_564_000 picoseconds. - Weight::from_parts(275_108_914, 6807) - // Standard Error: 505 - .saturating_add(Weight::from_parts(315_065, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `870 + r * (6 ±0)` + // Estimated: `9285 + r * (6 ±0)` + // Minimum execution time: 272_172_000 picoseconds. + Weight::from_parts(283_747_134, 9285) + // Standard Error: 885 + .saturating_add(Weight::from_parts(343_968, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -2606,13 +2656,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_gas_left(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `863 + r * (6 ±0)` - // Estimated: `6806 + r * (6 ±0)` - // Minimum execution time: 258_799_000 picoseconds. - Weight::from_parts(274_338_256, 6806) - // Standard Error: 632 - .saturating_add(Weight::from_parts(355_032, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `866 + r * (6 ±0)` + // Estimated: `9284 + r * (6 ±0)` + // Minimum execution time: 263_220_000 picoseconds. + Weight::from_parts(277_062_382, 9284) + // Standard Error: 1_783 + .saturating_add(Weight::from_parts(399_631, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -2633,13 +2683,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_balance(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1007 + r * (6 ±0)` - // Estimated: `6931 + r * (6 ±0)` - // Minimum execution time: 253_335_000 picoseconds. - Weight::from_parts(273_013_859, 6931) - // Standard Error: 2_007 - .saturating_add(Weight::from_parts(1_540_735, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(9_u64)) + // Measured: `1010 + r * (6 ±0)` + // Estimated: `9409 + r * (6 ±0)` + // Minimum execution time: 253_218_000 picoseconds. + Weight::from_parts(282_251_452, 9409) + // Standard Error: 2_367 + .saturating_add(Weight::from_parts(1_604_060, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -2660,13 +2710,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_value_transferred(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `877 + r * (6 ±0)` - // Estimated: `6823 + r * (6 ±0)` - // Minimum execution time: 252_325_000 picoseconds. - Weight::from_parts(274_733_944, 6823) - // Standard Error: 603 - .saturating_add(Weight::from_parts(314_467, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `880 + r * (6 ±0)` + // Estimated: `9301 + r * (6 ±0)` + // Minimum execution time: 271_797_000 picoseconds. + Weight::from_parts(288_594_393, 9301) + // Standard Error: 846 + .saturating_add(Weight::from_parts(335_063, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -2687,13 +2737,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_minimum_balance(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `875 + r * (6 ±0)` - // Estimated: `6816 + r * (6 ±0)` - // Minimum execution time: 250_698_000 picoseconds. - Weight::from_parts(271_707_578, 6816) - // Standard Error: 952 - .saturating_add(Weight::from_parts(318_412, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `878 + r * (6 ±0)` + // Estimated: `9294 + r * (6 ±0)` + // Minimum execution time: 254_997_000 picoseconds. + Weight::from_parts(284_331_894, 9294) + // Standard Error: 885 + .saturating_add(Weight::from_parts(337_073, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -2714,13 +2764,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_block_number(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `872 + r * (6 ±0)` - // Estimated: `6819 + r * (6 ±0)` - // Minimum execution time: 251_854_000 picoseconds. - Weight::from_parts(272_002_212, 6819) - // Standard Error: 622 - .saturating_add(Weight::from_parts(313_353, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `875 + r * (6 ±0)` + // Estimated: `9297 + r * (6 ±0)` + // Minimum execution time: 273_845_000 picoseconds. + Weight::from_parts(285_291_242, 9297) + // Standard Error: 690 + .saturating_add(Weight::from_parts(332_783, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -2741,13 +2791,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_now(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `863 + r * (6 ±0)` - // Estimated: `6804 + r * (6 ±0)` - // Minimum execution time: 252_010_000 picoseconds. - Weight::from_parts(270_387_000, 6804) - // Standard Error: 659 - .saturating_add(Weight::from_parts(325_856, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `866 + r * (6 ±0)` + // Estimated: `9282 + r * (6 ±0)` + // Minimum execution time: 268_302_000 picoseconds. + Weight::from_parts(280_463_257, 9282) + // Standard Error: 1_035 + .saturating_add(Weight::from_parts(345_811, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -2770,13 +2820,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_weight_to_fee(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `937 + r * (14 ±0)` - // Estimated: `6872 + r * (14 ±0)` - // Minimum execution time: 247_933_000 picoseconds. - Weight::from_parts(281_550_162, 6872) - // Standard Error: 660 - .saturating_add(Weight::from_parts(1_090_869, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(9_u64)) + // Measured: `940 + r * (14 ±0)` + // Estimated: `9350 + r * (14 ±0)` + // Minimum execution time: 263_433_000 picoseconds. + Weight::from_parts(290_098_370, 9350) + // Standard Error: 1_905 + .saturating_add(Weight::from_parts(805_299, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 14).saturating_mul(r.into())) } @@ -2797,13 +2847,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_input(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `865 + r * (6 ±0)` - // Estimated: `6807 + r * (6 ±0)` - // Minimum execution time: 251_158_000 picoseconds. - Weight::from_parts(274_623_152, 6807) - // Standard Error: 491 - .saturating_add(Weight::from_parts(263_916, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `868 + r * (6 ±0)` + // Estimated: `9285 + r * (6 ±0)` + // Minimum execution time: 273_588_000 picoseconds. + Weight::from_parts(287_133_565, 9285) + // Standard Error: 799 + .saturating_add(Weight::from_parts(254_114, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -2824,13 +2874,13 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 1048576]`. fn seal_input_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `869` - // Estimated: `6809` - // Minimum execution time: 263_205_000 picoseconds. - Weight::from_parts(216_792_893, 6809) - // Standard Error: 23 - .saturating_add(Weight::from_parts(989, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `872` + // Estimated: `9287` + // Minimum execution time: 257_843_000 picoseconds. + Weight::from_parts(228_177_495, 9287) + // Standard Error: 24 + .saturating_add(Weight::from_parts(985, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) @@ -2850,11 +2900,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1]`. fn seal_return(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `853 + r * (45 ±0)` - // Estimated: `6793 + r * (45 ±0)` - // Minimum execution time: 239_663_000 picoseconds. - Weight::from_parts(266_124_565, 6793) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `856 + r * (45 ±0)` + // Estimated: `9271 + r * (45 ±0)` + // Minimum execution time: 248_332_000 picoseconds. + Weight::from_parts(274_998_906, 9271) + // Standard Error: 949_561 + .saturating_add(Weight::from_parts(5_570_693, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 45).saturating_mul(r.into())) } @@ -2875,13 +2927,13 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 1048576]`. fn seal_return_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `863` - // Estimated: `6810` - // Minimum execution time: 241_763_000 picoseconds. - Weight::from_parts(266_535_552, 6810) - // Standard Error: 0 - .saturating_add(Weight::from_parts(320, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `866` + // Estimated: `9288` + // Minimum execution time: 272_055_000 picoseconds. + Weight::from_parts(282_280_090, 9288) + // Standard Error: 1 + .saturating_add(Weight::from_parts(330, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) @@ -2907,14 +2959,14 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1]`. fn seal_terminate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2972 + r * (316 ±0)` - // Estimated: `8912 + r * (5266 ±0)` - // Minimum execution time: 265_888_000 picoseconds. - Weight::from_parts(291_232_232, 8912) - // Standard Error: 845_475 - .saturating_add(Weight::from_parts(104_398_867, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) - .saturating_add(RocksDbWeight::get().reads((7_u64).saturating_mul(r.into()))) + // Measured: `2902 + r * (529 ±0)` + // Estimated: `11317 + r * (5479 ±0)` + // Minimum execution time: 274_842_000 picoseconds. + Weight::from_parts(299_824_497, 11317) + // Standard Error: 902_686 + .saturating_add(Weight::from_parts(106_562_902, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) + .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((10_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 5266).saturating_mul(r.into())) @@ -2938,13 +2990,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_random(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `944 + r * (10 ±0)` - // Estimated: `6885 + r * (10 ±0)` - // Minimum execution time: 248_500_000 picoseconds. - Weight::from_parts(282_353_053, 6885) - // Standard Error: 1_144 - .saturating_add(Weight::from_parts(1_193_841, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(9_u64)) + // Measured: `947 + r * (10 ±0)` + // Estimated: `9363 + r * (10 ±0)` + // Minimum execution time: 255_393_000 picoseconds. + Weight::from_parts(309_814_338, 9363) + // Standard Error: 2_978 + .saturating_add(Weight::from_parts(1_203_507, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 10).saturating_mul(r.into())) } @@ -2965,13 +3017,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_deposit_event(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `863 + r * (10 ±0)` - // Estimated: `6805 + r * (10 ±0)` - // Minimum execution time: 248_130_000 picoseconds. - Weight::from_parts(279_583_178, 6805) - // Standard Error: 971 - .saturating_add(Weight::from_parts(1_987_941, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `866 + r * (10 ±0)` + // Estimated: `9283 + r * (10 ±0)` + // Minimum execution time: 250_378_000 picoseconds. + Weight::from_parts(287_003_144, 9283) + // Standard Error: 2_726 + .saturating_add(Weight::from_parts(1_850_967, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 10).saturating_mul(r.into())) } @@ -2993,15 +3045,15 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 16384]`. fn seal_deposit_event_per_topic_and_byte(t: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `880 + t * (32 ±0)` - // Estimated: `6825 + t * (2508 ±0)` - // Minimum execution time: 258_594_000 picoseconds. - Weight::from_parts(276_734_422, 6825) - // Standard Error: 102_093 - .saturating_add(Weight::from_parts(2_559_383, 0).saturating_mul(t.into())) - // Standard Error: 28 - .saturating_add(Weight::from_parts(501, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `883 + t * (32 ±0)` + // Estimated: `9303 + t * (2508 ±0)` + // Minimum execution time: 266_787_000 picoseconds. + Weight::from_parts(284_368_661, 9303) + // Standard Error: 121_315 + .saturating_add(Weight::from_parts(2_690_211, 0).saturating_mul(t.into())) + // Standard Error: 33 + .saturating_add(Weight::from_parts(789, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(t.into()))) @@ -3024,13 +3076,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_debug_message(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `862 + r * (7 ±0)` - // Estimated: `6807 + r * (7 ±0)` - // Minimum execution time: 154_564_000 picoseconds. - Weight::from_parts(168_931_365, 6807) - // Standard Error: 349 - .saturating_add(Weight::from_parts(226_848, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `865 + r * (7 ±0)` + // Estimated: `9285 + r * (7 ±0)` + // Minimum execution time: 166_804_000 picoseconds. + Weight::from_parts(175_118_291, 9285) + // Standard Error: 398 + .saturating_add(Weight::from_parts(220_961, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 7).saturating_mul(r.into())) } @@ -3051,13 +3103,13 @@ impl WeightInfo for () { /// The range of component `i` is `[0, 1048576]`. fn seal_debug_message_per_byte(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `125813` - // Estimated: `131755` - // Minimum execution time: 394_382_000 picoseconds. - Weight::from_parts(376_780_500, 131755) + // Measured: `125816` + // Estimated: `131758` + // Minimum execution time: 398_436_000 picoseconds. + Weight::from_parts(385_003_285, 131758) // Standard Error: 12 - .saturating_add(Weight::from_parts(1_026, 0).saturating_mul(i.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + .saturating_add(Weight::from_parts(1_038, 0).saturating_mul(i.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -3065,13 +3117,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 800]`. fn seal_set_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `924 + r * (292 ±0)` - // Estimated: `926 + r * (293 ±0)` - // Minimum execution time: 249_757_000 picoseconds. - Weight::from_parts(177_324_374, 926) - // Standard Error: 9_512 - .saturating_add(Weight::from_parts(6_176_717, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `927 + r * (292 ±0)` + // Estimated: `929 + r * (293 ±0)` + // Minimum execution time: 274_099_000 picoseconds. + Weight::from_parts(174_282_817, 929) + // Standard Error: 10_547 + .saturating_add(Weight::from_parts(6_262_173, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -3082,13 +3134,13 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 16384]`. fn seal_set_storage_per_new_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1447` - // Estimated: `1430` - // Minimum execution time: 267_564_000 picoseconds. - Weight::from_parts(328_701_080, 1430) - // Standard Error: 61 - .saturating_add(Weight::from_parts(576, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(12_u64)) + // Measured: `1450` + // Estimated: `1433` + // Minimum execution time: 274_061_000 picoseconds. + Weight::from_parts(334_755_333, 1433) + // Standard Error: 66 + .saturating_add(Weight::from_parts(771, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(15_u64)) .saturating_add(RocksDbWeight::get().writes(8_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -3096,13 +3148,11 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 16384]`. fn seal_set_storage_per_old_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1253 + n * (1 ±0)` - // Estimated: `1253 + n * (1 ±0)` - // Minimum execution time: 266_347_000 picoseconds. - Weight::from_parts(289_824_718, 1253) - // Standard Error: 34 - .saturating_add(Weight::from_parts(184, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(9_u64)) + // Measured: `1256 + n * (1 ±0)` + // Estimated: `1256 + n * (1 ±0)` + // Minimum execution time: 272_657_000 picoseconds. + Weight::from_parts(299_061_594, 1256) + .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -3111,13 +3161,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 800]`. fn seal_clear_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `921 + r * (288 ±0)` - // Estimated: `927 + r * (289 ±0)` - // Minimum execution time: 247_207_000 picoseconds. - Weight::from_parts(179_856_075, 927) - // Standard Error: 9_383 - .saturating_add(Weight::from_parts(6_053_198, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `924 + r * (288 ±0)` + // Estimated: `930 + r * (289 ±0)` + // Minimum execution time: 270_524_000 picoseconds. + Weight::from_parts(177_959_667, 930) + // Standard Error: 10_397 + .saturating_add(Weight::from_parts(6_270_181, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -3128,13 +3178,11 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 16384]`. fn seal_clear_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1249 + n * (1 ±0)` - // Estimated: `1249 + n * (1 ±0)` - // Minimum execution time: 262_655_000 picoseconds. - Weight::from_parts(289_482_543, 1249) - // Standard Error: 35 - .saturating_add(Weight::from_parts(92, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(9_u64)) + // Measured: `1252 + n * (1 ±0)` + // Estimated: `1252 + n * (1 ±0)` + // Minimum execution time: 270_757_000 picoseconds. + Weight::from_parts(298_627_896, 1252) + .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -3143,13 +3191,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 800]`. fn seal_get_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `921 + r * (296 ±0)` - // Estimated: `923 + r * (297 ±0)` - // Minimum execution time: 247_414_000 picoseconds. - Weight::from_parts(203_317_182, 923) - // Standard Error: 7_191 - .saturating_add(Weight::from_parts(4_925_154, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `924 + r * (296 ±0)` + // Estimated: `926 + r * (297 ±0)` + // Minimum execution time: 268_082_000 picoseconds. + Weight::from_parts(202_583_730, 926) + // Standard Error: 9_029 + .saturating_add(Weight::from_parts(5_028_517, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 297).saturating_mul(r.into())) @@ -3159,13 +3207,13 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 16384]`. fn seal_get_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1265 + n * (1 ±0)` - // Estimated: `1265 + n * (1 ±0)` - // Minimum execution time: 258_910_000 picoseconds. - Weight::from_parts(283_086_514, 1265) - // Standard Error: 39 - .saturating_add(Weight::from_parts(980, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(9_u64)) + // Measured: `1268 + n * (1 ±0)` + // Estimated: `1268 + n * (1 ±0)` + // Minimum execution time: 274_100_000 picoseconds. + Weight::from_parts(296_981_515, 1268) + // Standard Error: 34 + .saturating_add(Weight::from_parts(578, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -3174,13 +3222,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 800]`. fn seal_contains_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `932 + r * (288 ±0)` - // Estimated: `929 + r * (289 ±0)` - // Minimum execution time: 252_410_000 picoseconds. - Weight::from_parts(201_227_879, 929) - // Standard Error: 6_899 - .saturating_add(Weight::from_parts(4_774_983, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `935 + r * (288 ±0)` + // Estimated: `932 + r * (289 ±0)` + // Minimum execution time: 269_697_000 picoseconds. + Weight::from_parts(198_346_516, 932) + // Standard Error: 8_623 + .saturating_add(Weight::from_parts(4_964_116, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 289).saturating_mul(r.into())) @@ -3190,13 +3238,13 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 16384]`. fn seal_contains_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1252 + n * (1 ±0)` - // Estimated: `1252 + n * (1 ±0)` - // Minimum execution time: 259_053_000 picoseconds. - Weight::from_parts(283_392_084, 1252) - // Standard Error: 41 - .saturating_add(Weight::from_parts(213, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(9_u64)) + // Measured: `1255 + n * (1 ±0)` + // Estimated: `1255 + n * (1 ±0)` + // Minimum execution time: 264_210_000 picoseconds. + Weight::from_parts(291_387_652, 1255) + // Standard Error: 36 + .saturating_add(Weight::from_parts(524, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -3205,13 +3253,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 800]`. fn seal_take_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `914 + r * (296 ±0)` - // Estimated: `919 + r * (297 ±0)` - // Minimum execution time: 251_371_000 picoseconds. - Weight::from_parts(177_119_717, 919) - // Standard Error: 9_421 - .saturating_add(Weight::from_parts(6_226_005, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `917 + r * (296 ±0)` + // Estimated: `922 + r * (297 ±0)` + // Minimum execution time: 267_219_000 picoseconds. + Weight::from_parts(175_866_982, 922) + // Standard Error: 11_275 + .saturating_add(Weight::from_parts(6_424_755, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -3222,13 +3270,13 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 16384]`. fn seal_take_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1266 + n * (1 ±0)` - // Estimated: `1266 + n * (1 ±0)` - // Minimum execution time: 263_350_000 picoseconds. - Weight::from_parts(284_323_917, 1266) - // Standard Error: 31 - .saturating_add(Weight::from_parts(921, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(9_u64)) + // Measured: `1269 + n * (1 ±0)` + // Estimated: `1269 + n * (1 ±0)` + // Minimum execution time: 274_634_000 picoseconds. + Weight::from_parts(294_272_009, 1269) + // Standard Error: 36 + .saturating_add(Weight::from_parts(1_219, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -3249,13 +3297,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_transfer(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1415 + r * (45 ±0)` - // Estimated: `7307 + r * (2520 ±0)` - // Minimum execution time: 248_701_000 picoseconds. - Weight::from_parts(17_811_969, 7307) - // Standard Error: 35_154 - .saturating_add(Weight::from_parts(31_809_738, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(9_u64)) + // Measured: `1418 + r * (45 ±0)` + // Estimated: `9785 + r * (2520 ±0)` + // Minimum execution time: 266_833_000 picoseconds. + Weight::from_parts(186_049_741, 9785) + // Standard Error: 40_311 + .saturating_add(Weight::from_parts(31_365_585, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -3278,13 +3326,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 800]`. fn seal_call(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1260 + r * (245 ±0)` - // Estimated: `9440 + r * (2721 ±0)` - // Minimum execution time: 247_335_000 picoseconds. - Weight::from_parts(264_025_000, 9440) - // Standard Error: 121_299 - .saturating_add(Weight::from_parts(234_770_827, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) + // Measured: `1263 + r * (245 ±0)` + // Estimated: `9635 + r * (2721 ±0)` + // Minimum execution time: 272_140_000 picoseconds. + Weight::from_parts(275_009_000, 9635) + // Standard Error: 99_187 + .saturating_add(Weight::from_parts(240_702_479, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(14_u64)) .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(r.into()))) @@ -3308,12 +3356,12 @@ impl WeightInfo for () { fn seal_delegate_call(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + r * (576 ±0)` - // Estimated: `6812 + r * (2637 ±3)` - // Minimum execution time: 261_011_000 picoseconds. - Weight::from_parts(264_554_000, 6812) - // Standard Error: 104_415 - .saturating_add(Weight::from_parts(231_627_084, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Estimated: `9290 + r * (2637 ±10)` + // Minimum execution time: 263_664_000 picoseconds. + Weight::from_parts(277_240_000, 9290) + // Standard Error: 169_147 + .saturating_add(Weight::from_parts(240_084_929, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -3337,15 +3385,15 @@ impl WeightInfo for () { /// The range of component `c` is `[0, 1048576]`. fn seal_call_per_transfer_clone_byte(t: u32, c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1307 + t * (277 ±0)` - // Estimated: `12197 + t * (5227 ±0)` - // Minimum execution time: 445_561_000 picoseconds. - Weight::from_parts(62_287_490, 12197) - // Standard Error: 11_797_697 - .saturating_add(Weight::from_parts(357_530_529, 0).saturating_mul(t.into())) + // Measured: `1310 + t * (277 ±0)` + // Estimated: `12200 + t * (5227 ±0)` + // Minimum execution time: 464_644_000 picoseconds. + Weight::from_parts(40_377_313, 12200) + // Standard Error: 12_060_226 + .saturating_add(Weight::from_parts(380_635_982, 0).saturating_mul(t.into())) // Standard Error: 17 - .saturating_add(Weight::from_parts(970, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(13_u64)) + .saturating_add(Weight::from_parts(1_014, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(16_u64)) .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(t.into()))) .saturating_add(RocksDbWeight::get().writes(6_u64)) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(t.into()))) @@ -3372,13 +3420,13 @@ impl WeightInfo for () { /// The range of component `r` is `[1, 800]`. fn seal_instantiate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1278 + r * (255 ±0)` - // Estimated: `9620 + r * (2731 ±0)` - // Minimum execution time: 621_897_000 picoseconds. - Weight::from_parts(631_687_000, 9620) - // Standard Error: 215_241 - .saturating_add(Weight::from_parts(350_527_831, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) + // Measured: `1281 + r * (255 ±0)` + // Estimated: `9623 + r * (2731 ±0)` + // Minimum execution time: 641_845_000 picoseconds. + Weight::from_parts(649_908_000, 9623) + // Standard Error: 278_514 + .saturating_add(Weight::from_parts(361_164_598, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(14_u64)) .saturating_add(RocksDbWeight::get().reads((6_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(7_u64)) .saturating_add(RocksDbWeight::get().writes((5_u64).saturating_mul(r.into()))) @@ -3407,17 +3455,15 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 983040]`. fn seal_instantiate_per_transfer_input_salt_byte(t: u32, i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1303 + t * (104 ±0)` - // Estimated: `12211 + t * (2549 ±1)` - // Minimum execution time: 2_181_184_000 picoseconds. - Weight::from_parts(1_194_190_111, 12211) - // Standard Error: 11_578_766 - .saturating_add(Weight::from_parts(6_361_884, 0).saturating_mul(t.into())) - // Standard Error: 18 - .saturating_add(Weight::from_parts(1_025, 0).saturating_mul(i.into())) - // Standard Error: 18 - .saturating_add(Weight::from_parts(1_158, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(16_u64)) + // Measured: `1306 + t * (104 ±0)` + // Estimated: `12214 + t * (2549 ±1)` + // Minimum execution time: 2_111_632_000 picoseconds. + Weight::from_parts(1_213_125_745, 12214) + // Standard Error: 22 + .saturating_add(Weight::from_parts(1_055, 0).saturating_mul(i.into())) + // Standard Error: 22 + .saturating_add(Weight::from_parts(1_192, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(19_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(RocksDbWeight::get().writes(11_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(t.into()))) @@ -3440,13 +3486,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_hash_sha2_256(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `862 + r * (8 ±0)` - // Estimated: `6801 + r * (8 ±0)` - // Minimum execution time: 241_609_000 picoseconds. - Weight::from_parts(268_716_874, 6801) - // Standard Error: 617 - .saturating_add(Weight::from_parts(377_753, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `865 + r * (8 ±0)` + // Estimated: `9279 + r * (8 ±0)` + // Minimum execution time: 264_227_000 picoseconds. + Weight::from_parts(281_015_995, 9279) + // Standard Error: 710 + .saturating_add(Weight::from_parts(365_734, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } @@ -3467,13 +3513,13 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 1048576]`. fn seal_hash_sha2_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `870` - // Estimated: `6808` - // Minimum execution time: 261_296_000 picoseconds. - Weight::from_parts(255_531_654, 6808) + // Measured: `873` + // Estimated: `9286` + // Minimum execution time: 270_709_000 picoseconds. + Weight::from_parts(268_416_051, 9286) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_081, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + .saturating_add(Weight::from_parts(1_080, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) @@ -3493,13 +3539,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_hash_keccak_256(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `864 + r * (8 ±0)` - // Estimated: `6806 + r * (8 ±0)` - // Minimum execution time: 243_583_000 picoseconds. - Weight::from_parts(270_025_058, 6806) - // Standard Error: 560 - .saturating_add(Weight::from_parts(767_519, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `867 + r * (8 ±0)` + // Estimated: `9284 + r * (8 ±0)` + // Minimum execution time: 267_283_000 picoseconds. + Weight::from_parts(280_706_659, 9284) + // Standard Error: 540 + .saturating_add(Weight::from_parts(790_312, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } @@ -3520,13 +3566,13 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 1048576]`. fn seal_hash_keccak_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `872` - // Estimated: `6814` - // Minimum execution time: 253_798_000 picoseconds. - Weight::from_parts(265_542_351, 6814) - // Standard Error: 0 - .saturating_add(Weight::from_parts(3_343, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `875` + // Estimated: `9292` + // Minimum execution time: 262_010_000 picoseconds. + Weight::from_parts(273_278_744, 9292) + // Standard Error: 1 + .saturating_add(Weight::from_parts(3_362, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) @@ -3546,13 +3592,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_hash_blake2_256(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `864 + r * (8 ±0)` - // Estimated: `6808 + r * (8 ±0)` - // Minimum execution time: 247_332_000 picoseconds. - Weight::from_parts(269_183_656, 6808) - // Standard Error: 665 - .saturating_add(Weight::from_parts(443_386, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `867 + r * (8 ±0)` + // Estimated: `9286 + r * (8 ±0)` + // Minimum execution time: 268_698_000 picoseconds. + Weight::from_parts(282_890_578, 9286) + // Standard Error: 622 + .saturating_add(Weight::from_parts(441_131, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } @@ -3573,13 +3619,13 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `872` - // Estimated: `6813` - // Minimum execution time: 250_855_000 picoseconds. - Weight::from_parts(258_752_975, 6813) + // Measured: `875` + // Estimated: `9291` + // Minimum execution time: 252_846_000 picoseconds. + Weight::from_parts(267_657_561, 9291) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_202, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + .saturating_add(Weight::from_parts(1_208, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) @@ -3599,13 +3645,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_hash_blake2_128(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `864 + r * (8 ±0)` - // Estimated: `6805 + r * (8 ±0)` - // Minimum execution time: 240_733_000 picoseconds. - Weight::from_parts(269_134_358, 6805) - // Standard Error: 512 - .saturating_add(Weight::from_parts(440_043, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `867 + r * (8 ±0)` + // Estimated: `9283 + r * (8 ±0)` + // Minimum execution time: 266_899_000 picoseconds. + Weight::from_parts(276_862_067, 9283) + // Standard Error: 1_249 + .saturating_add(Weight::from_parts(443_970, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } @@ -3626,13 +3672,13 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_128_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `872` - // Estimated: `6811` - // Minimum execution time: 247_377_000 picoseconds. - Weight::from_parts(261_077_322, 6811) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_195, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `875` + // Estimated: `9289` + // Minimum execution time: 265_413_000 picoseconds. + Weight::from_parts(265_600_840, 9289) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_203, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) @@ -3652,13 +3698,13 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 125697]`. fn seal_sr25519_verify_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `997 + n * (1 ±0)` - // Estimated: `6934 + n * (1 ±0)` - // Minimum execution time: 307_337_000 picoseconds. - Weight::from_parts(326_710_473, 6934) - // Standard Error: 9 - .saturating_add(Weight::from_parts(5_765, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `1000 + n * (1 ±0)` + // Estimated: `9412 + n * (1 ±0)` + // Minimum execution time: 328_341_000 picoseconds. + Weight::from_parts(341_038_581, 9412) + // Standard Error: 10 + .saturating_add(Weight::from_parts(5_863, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -3679,13 +3725,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 160]`. fn seal_sr25519_verify(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `805 + r * (112 ±0)` - // Estimated: `6748 + r * (112 ±0)` - // Minimum execution time: 245_432_000 picoseconds. - Weight::from_parts(294_206_377, 6748) - // Standard Error: 7_229 - .saturating_add(Weight::from_parts(41_480_485, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `808 + r * (112 ±0)` + // Estimated: `9226 + r * (112 ±0)` + // Minimum execution time: 272_730_000 picoseconds. + Weight::from_parts(339_349_232, 9226) + // Standard Error: 13_004 + .saturating_add(Weight::from_parts(41_649_026, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 112).saturating_mul(r.into())) } @@ -3706,13 +3752,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 160]`. fn seal_ecdsa_recover(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `907 + r * (76 ±0)` - // Estimated: `6802 + r * (77 ±0)` - // Minimum execution time: 247_788_000 picoseconds. - Weight::from_parts(303_940_062, 6802) - // Standard Error: 10_671 - .saturating_add(Weight::from_parts(45_730_772, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `910 + r * (76 ±0)` + // Estimated: `9279 + r * (77 ±0)` + // Minimum execution time: 273_892_000 picoseconds. + Weight::from_parts(352_853_960, 9279) + // Standard Error: 16_745 + .saturating_add(Weight::from_parts(45_853_294, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 77).saturating_mul(r.into())) } @@ -3733,13 +3779,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 160]`. fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `877 + r * (42 ±0)` - // Estimated: `6816 + r * (42 ±0)` - // Minimum execution time: 248_825_000 picoseconds. - Weight::from_parts(286_832_225, 6816) - // Standard Error: 5_274 - .saturating_add(Weight::from_parts(11_889_262, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `880 + r * (42 ±0)` + // Estimated: `9294 + r * (42 ±0)` + // Minimum execution time: 268_431_000 picoseconds. + Weight::from_parts(319_727_796, 9294) + // Standard Error: 10_276 + .saturating_add(Weight::from_parts(11_928_882, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 42).saturating_mul(r.into())) } @@ -3761,12 +3807,12 @@ impl WeightInfo for () { fn seal_set_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + r * (965 ±0)` - // Estimated: `6807 + r * (3090 ±7)` - // Minimum execution time: 244_982_000 picoseconds. - Weight::from_parts(265_297_000, 6807) - // Standard Error: 39_895 - .saturating_add(Weight::from_parts(22_435_888, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Estimated: `9285 + r * (3090 ±7)` + // Minimum execution time: 275_482_000 picoseconds. + Weight::from_parts(279_155_000, 9285) + // Standard Error: 65_388 + .saturating_add(Weight::from_parts(26_634_187, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(r.into()))) @@ -3787,15 +3833,15 @@ impl WeightInfo for () { /// Storage: `System::EventTopics` (r:2 w:2) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 32]`. - fn add_delegate_dependency(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `928 + r * (131 ±0)` - // Estimated: `6878 + r * (2606 ±0)` - // Minimum execution time: 246_455_000 picoseconds. - Weight::from_parts(275_334_919, 6878) - // Standard Error: 20_911 - .saturating_add(Weight::from_parts(6_427_525, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + fn lock_delegate_dependency(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `937 + r * (131 ±0)` + // Estimated: `9346 + r * (2607 ±0)` + // Minimum execution time: 270_001_000 picoseconds. + Weight::from_parts(286_792_689, 9346) + // Standard Error: 21_074 + .saturating_add(Weight::from_parts(6_465_885, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -3816,15 +3862,15 @@ impl WeightInfo for () { /// Storage: `System::EventTopics` (r:2 w:2) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 32]`. - fn remove_delegate_dependency(r: u32, ) -> Weight { + fn unlock_delegate_dependency(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `969 + r * (183 ±0)` // Estimated: `129453 + r * (2568 ±0)` - // Minimum execution time: 254_472_000 picoseconds. - Weight::from_parts(280_657_909, 129453) - // Standard Error: 20_131 - .saturating_add(Weight::from_parts(5_644_006, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Minimum execution time: 270_652_000 picoseconds. + Weight::from_parts(293_369_452, 129453) + // Standard Error: 24_321 + .saturating_add(Weight::from_parts(5_575_600, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -3847,13 +3893,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_reentrance_count(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `858 + r * (3 ±0)` - // Estimated: `6804 + r * (3 ±0)` - // Minimum execution time: 250_535_000 picoseconds. - Weight::from_parts(270_318_376, 6804) - // Standard Error: 386 - .saturating_add(Weight::from_parts(174_627, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `861 + r * (3 ±0)` + // Estimated: `9282 + r * (3 ±0)` + // Minimum execution time: 267_749_000 picoseconds. + Weight::from_parts(280_373_341, 9282) + // Standard Error: 464 + .saturating_add(Weight::from_parts(170_398, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } @@ -3874,13 +3920,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_account_reentrance_count(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2109 + r * (39 ±0)` - // Estimated: `7899 + r * (40 ±0)` - // Minimum execution time: 248_174_000 picoseconds. - Weight::from_parts(301_826_520, 7899) - // Standard Error: 801 - .saturating_add(Weight::from_parts(248_479, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `2112 + r * (39 ±0)` + // Estimated: `10377 + r * (40 ±0)` + // Minimum execution time: 270_260_000 picoseconds. + Weight::from_parts(337_969_172, 10377) + // Standard Error: 1_557 + .saturating_add(Weight::from_parts(305_735, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 40).saturating_mul(r.into())) } @@ -3903,13 +3949,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_instantiation_nonce(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `861 + r * (3 ±0)` - // Estimated: `6801 + r * (3 ±0)` - // Minimum execution time: 246_540_000 picoseconds. - Weight::from_parts(268_913_509, 6801) - // Standard Error: 378 - .saturating_add(Weight::from_parts(154_950, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(9_u64)) + // Measured: `864 + r * (3 ±0)` + // Estimated: `9279 + r * (3 ±0)` + // Minimum execution time: 265_607_000 picoseconds. + Weight::from_parts(283_396_630, 9279) + // Standard Error: 449 + .saturating_add(Weight::from_parts(149_018, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } @@ -3918,9 +3964,9 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_777_000 picoseconds. - Weight::from_parts(1_707_601, 0) - // Standard Error: 14 - .saturating_add(Weight::from_parts(15_392, 0).saturating_mul(r.into())) + // Minimum execution time: 1_813_000 picoseconds. + Weight::from_parts(1_264_945, 0) + // Standard Error: 19 + .saturating_add(Weight::from_parts(15_098, 0).saturating_mul(r.into())) } } diff --git a/substrate/frame/contracts/uapi/src/host.rs b/substrate/frame/contracts/uapi/src/host.rs index 7894dc3a210ae7cf4ce3e847bfcc3fcd049e9be1..04f58895ab4f6ccebb6452ebeec20038ce7f0d78 100644 --- a/substrate/frame/contracts/uapi/src/host.rs +++ b/substrate/frame/contracts/uapi/src/host.rs @@ -93,7 +93,7 @@ pub trait HostFn { /// - `output`: A reference to the output data buffer to write the address. fn address(output: &mut &mut [u8]); - /// Adds a new delegate dependency to the contract. + /// Lock a new delegate dependency to the contract. /// /// Traps if the maximum number of delegate_dependencies is reached or if /// the delegate dependency already exists. @@ -102,10 +102,7 @@ pub trait HostFn { /// /// - `code_hash`: The code hash of the dependency. Should be decodable as an `T::Hash`. Traps /// otherwise. - #[deprecated( - note = "Unstable function. Behaviour can change without further notice. Use only for testing." - )] - fn add_delegate_dependency(code_hash: &[u8]); + fn lock_delegate_dependency(code_hash: &[u8]); /// Stores the *free* balance of the current account into the supplied buffer. /// @@ -142,6 +139,7 @@ pub trait HostFn { /// /// Equivalent to the newer [`Self::call_v2`] version but works with /// *ref_time* Weight only + #[deprecated(note = "Deprecated, use newer version instead")] fn call_v1( flags: CallFlags, callee: &[u8], @@ -178,14 +176,11 @@ pub trait HostFn { /// - [CalleeTrapped][`crate::ReturnErrorCode::CalleeTrapped] /// - [TransferFailed][`crate::ReturnErrorCode::TransferFailed] /// - [NotCallable][`crate::ReturnErrorCode::NotCallable] - #[deprecated( - note = "Unstable function. Behaviour can change without further notice. Use only for testing." - )] fn call_v2( flags: CallFlags, callee: &[u8], ref_time_limit: u64, - proof_time_limit: u64, + proof_size_limit: u64, deposit: Option<&[u8]>, value: &[u8], input_data: &[u8], @@ -480,6 +475,7 @@ pub trait HostFn { /// /// Equivalent to the newer [`Self::instantiate_v2`] version but works /// with *ref_time* Weight only. + #[deprecated(note = "Deprecated, use newer version instead")] fn instantiate_v1( code_hash: &[u8], gas: u64, @@ -524,9 +520,6 @@ pub trait HostFn { /// - [CalleeTrapped][`crate::ReturnErrorCode::CalleeTrapped] /// - [TransferFailed][`crate::ReturnErrorCode::TransferFailed] /// - [CodeNotFound][`crate::ReturnErrorCode::CodeNotFound] - #[deprecated( - note = "Unstable function. Behaviour can change without further notice. Use only for testing." - )] fn instantiate_v2( code_hash: &[u8], ref_time_limit: u64, @@ -602,10 +595,7 @@ pub trait HostFn { /// /// - `code_hash`: The code hash of the dependency. Should be decodable as an `T::Hash`. Traps /// otherwise. - #[deprecated( - note = "Unstable function. Behaviour can change without further notice. Use only for testing." - )] - fn remove_delegate_dependency(code_hash: &[u8]); + fn unlock_delegate_dependency(code_hash: &[u8]); /// Cease contract execution and save a data buffer as a result of the execution. /// diff --git a/substrate/frame/contracts/uapi/src/host/riscv32.rs b/substrate/frame/contracts/uapi/src/host/riscv32.rs index dc1c48308df81a9d75648aa399a515bd90df2542..561ab28747df9e55d105d6db09f4776d5e453e28 100644 --- a/substrate/frame/contracts/uapi/src/host/riscv32.rs +++ b/substrate/frame/contracts/uapi/src/host/riscv32.rs @@ -130,7 +130,7 @@ impl HostFn for HostFnImpl { flags: CallFlags, callee: &[u8], ref_time_limit: u64, - proof_time_limit: u64, + proof_size_limit: u64, deposit: Option<&[u8]>, value: &[u8], input_data: &[u8], @@ -281,11 +281,11 @@ impl HostFn for HostFnImpl { todo!() } - fn add_delegate_dependency(code_hash: &[u8]) { + fn lock_delegate_dependency(code_hash: &[u8]) { todo!() } - fn remove_delegate_dependency(code_hash: &[u8]) { + fn unlock_delegate_dependency(code_hash: &[u8]) { todo!() } diff --git a/substrate/frame/contracts/uapi/src/host/wasm32.rs b/substrate/frame/contracts/uapi/src/host/wasm32.rs index 3df3abeb7dcdc1d9f9ed0ac7c9023dd4853da26c..bc697238061ab16e47d304bd9583f51f7eb99c0b 100644 --- a/substrate/frame/contracts/uapi/src/host/wasm32.rs +++ b/substrate/frame/contracts/uapi/src/host/wasm32.rs @@ -23,7 +23,7 @@ mod sys { extern "C" { pub fn account_reentrance_count(account_ptr: *const u8) -> u32; - pub fn add_delegate_dependency(code_hash_ptr: *const u8); + pub fn lock_delegate_dependency(code_hash_ptr: *const u8); pub fn address(output_ptr: *mut u8, output_len_ptr: *mut u32); @@ -125,7 +125,7 @@ mod sys { pub fn reentrance_count() -> u32; - pub fn remove_delegate_dependency(code_hash_ptr: *const u8); + pub fn unlock_delegate_dependency(code_hash_ptr: *const u8); pub fn seal_return(flags: u32, data_ptr: *const u8, data_len: u32) -> !; @@ -223,7 +223,7 @@ mod sys { pub fn weight_to_fee( ref_time_limit: u64, - proof_time_limit: u64, + proof_size_limit: u64, output_ptr: *mut u8, output_len_ptr: *mut u32, ); @@ -239,7 +239,7 @@ mod sys { flags: u32, callee_ptr: *const u8, ref_time_limit: u64, - proof_time_limit: u64, + proof_size_limit: u64, deposit_ptr: *const u8, transferred_value_ptr: *const u8, input_data_ptr: *const u8, @@ -251,7 +251,7 @@ mod sys { pub fn instantiate( code_hash_ptr: *const u8, ref_time_limit: u64, - proof_time_limit: u64, + proof_size_limit: u64, deposit_ptr: *const u8, value_ptr: *const u8, input_ptr: *const u8, @@ -301,6 +301,7 @@ macro_rules! impl_wrapper_for { unsafe { $( $mod )::*::$name(output.as_mut_ptr(), &mut output_len); } + extract_from_slice(output, output_len as usize) } } }; @@ -487,7 +488,7 @@ impl HostFn for HostFnImpl { flags: CallFlags, callee: &[u8], ref_time_limit: u64, - proof_time_limit: u64, + proof_size_limit: u64, deposit: Option<&[u8]>, value: &[u8], input_data: &[u8], @@ -501,7 +502,7 @@ impl HostFn for HostFnImpl { flags.bits(), callee.as_ptr(), ref_time_limit, - proof_time_limit, + proof_size_limit, deposit_ptr, value.as_ptr(), input_data.as_ptr(), @@ -803,12 +804,12 @@ impl HostFn for HostFnImpl { unsafe { sys::account_reentrance_count(account.as_ptr()) } } - fn add_delegate_dependency(code_hash: &[u8]) { - unsafe { sys::add_delegate_dependency(code_hash.as_ptr()) } + fn lock_delegate_dependency(code_hash: &[u8]) { + unsafe { sys::lock_delegate_dependency(code_hash.as_ptr()) } } - fn remove_delegate_dependency(code_hash: &[u8]) { - unsafe { sys::remove_delegate_dependency(code_hash.as_ptr()) } + fn unlock_delegate_dependency(code_hash: &[u8]) { + unsafe { sys::unlock_delegate_dependency(code_hash.as_ptr()) } } fn instantiation_nonce() -> u64 { diff --git a/substrate/frame/conviction-voting/Cargo.toml b/substrate/frame/conviction-voting/Cargo.toml index d4921bf7b4be58fd96207bac8d01120a8db49ae3..ff5af995026f39527780d964aea16ec13be1ec2b 100644 --- a/substrate/frame/conviction-voting/Cargo.toml +++ b/substrate/frame/conviction-voting/Cargo.toml @@ -22,7 +22,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "max-encoded-len", ] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", features = ["derive"], optional = true } +serde = { features = ["derive"], optional = true, workspace = true, default-features = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/democracy/Cargo.toml b/substrate/frame/democracy/Cargo.toml index e1e8964dd9cf711d7f23a012f7e05266f9a146ef..9a55cda5340c2fa795fd79b17ee4e2b5f568bed2 100644 --- a/substrate/frame/democracy/Cargo.toml +++ b/substrate/frame/democracy/Cargo.toml @@ -20,7 +20,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", features = ["derive"], optional = true } +serde = { features = ["derive"], optional = true, workspace = true, default-features = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/democracy/src/lib.rs b/substrate/frame/democracy/src/lib.rs index 089556191cd14eee735d12976ba6b0ed471a4eac..08e2a7599f5542a8b87ce80df8d96b5cf4ee9af5 100644 --- a/substrate/frame/democracy/src/lib.rs +++ b/substrate/frame/democracy/src/lib.rs @@ -211,7 +211,7 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] diff --git a/substrate/frame/election-provider-multi-phase/src/lib.rs b/substrate/frame/election-provider-multi-phase/src/lib.rs index 4f43f89abed222f963156c2f5afaebb8288bf45a..6bf4dfe4f1eb993e35a99a34bd21b2bde81d4355 100644 --- a/substrate/frame/election-provider-multi-phase/src/lib.rs +++ b/substrate/frame/election-provider-multi-phase/src/lib.rs @@ -1343,7 +1343,7 @@ pub mod pallet { #[pallet::getter(fn minimum_untrusted_score)] pub type MinimumUntrustedScore = StorageValue<_, ElectionScore>; - /// The current storage version. + /// The in-code storage version. /// /// v1: https://github.com/paritytech/substrate/pull/12237/ const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); diff --git a/substrate/frame/election-provider-multi-phase/src/migrations.rs b/substrate/frame/election-provider-multi-phase/src/migrations.rs index 50b821e6db6ae8c7a2cc68192f9dd8cb39e9f460..156f1c02e27cd23e3e37014650fdff66115cd18f 100644 --- a/substrate/frame/election-provider-multi-phase/src/migrations.rs +++ b/substrate/frame/election-provider-multi-phase/src/migrations.rs @@ -27,12 +27,12 @@ pub mod v1 { pub struct MigrateToV1(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV1 { fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); + let current = Pallet::::in_code_storage_version(); let onchain = Pallet::::on_chain_storage_version(); log!( info, - "Running migration with current storage version {:?} / onchain {:?}", + "Running migration with in-code storage version {:?} / onchain {:?}", current, onchain ); diff --git a/substrate/frame/election-provider-support/solution-type/Cargo.toml b/substrate/frame/election-provider-support/solution-type/Cargo.toml index b5d6d1dca54e18947b5b6c86306df16d650a24ac..1bf1165229a7dd20700bb561d4dd776ca7c250bb 100644 --- a/substrate/frame/election-provider-support/solution-type/Cargo.toml +++ b/substrate/frame/election-provider-support/solution-type/Cargo.toml @@ -18,8 +18,8 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -syn = { version = "2.0.49", features = ["full", "visit"] } -quote = "1.0.28" +syn = { features = ["full", "visit"], workspace = true } +quote = { workspace = true } proc-macro2 = "1.0.56" proc-macro-crate = "3.0.0" diff --git a/substrate/frame/elections-phragmen/src/lib.rs b/substrate/frame/elections-phragmen/src/lib.rs index a078361a5f713b5d267fdc1002ac361f95a3faef..308f2cdc1aada6b3e5c6fad257b75b9f0b5393d9 100644 --- a/substrate/frame/elections-phragmen/src/lib.rs +++ b/substrate/frame/elections-phragmen/src/lib.rs @@ -188,7 +188,7 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); #[pallet::pallet] diff --git a/substrate/frame/examples/Cargo.toml b/substrate/frame/examples/Cargo.toml index eb6355edd312a1ff5f865c235ffd034f085a3027..45c7440eb89135eca98cf00a4aaf5d6ad2c094ca 100644 --- a/substrate/frame/examples/Cargo.toml +++ b/substrate/frame/examples/Cargo.toml @@ -23,6 +23,7 @@ pallet-example-frame-crate = { path = "frame-crate", default-features = false } pallet-example-kitchensink = { path = "kitchensink", default-features = false } pallet-example-offchain-worker = { path = "offchain-worker", default-features = false } pallet-example-split = { path = "split", default-features = false } +pallet-example-single-block-migrations = { path = "single-block-migrations", default-features = false } pallet-example-tasks = { path = "tasks", default-features = false } [features] @@ -34,6 +35,7 @@ std = [ "pallet-example-frame-crate/std", "pallet-example-kitchensink/std", "pallet-example-offchain-worker/std", + "pallet-example-single-block-migrations/std", "pallet-example-split/std", "pallet-example-tasks/std", ] @@ -43,6 +45,7 @@ try-runtime = [ "pallet-example-basic/try-runtime", "pallet-example-kitchensink/try-runtime", "pallet-example-offchain-worker/try-runtime", + "pallet-example-single-block-migrations/try-runtime", "pallet-example-split/try-runtime", "pallet-example-tasks/try-runtime", ] diff --git a/substrate/frame/examples/basic/src/benchmarking.rs b/substrate/frame/examples/basic/src/benchmarking.rs index 4b2ebb41fbda1d6d8a0d12a65b32391510f1a488..65ca3089aba4247cae65932cd4bca9b68d520b75 100644 --- a/substrate/frame/examples/basic/src/benchmarking.rs +++ b/substrate/frame/examples/basic/src/benchmarking.rs @@ -48,7 +48,7 @@ mod benchmarks { set_dummy(RawOrigin::Root, value); // The execution phase is just running `set_dummy` extrinsic call // This is the optional benchmark verification phase, asserting certain states. - assert_eq!(Pallet::::dummy(), Some(value)) + assert_eq!(Dummy::::get(), Some(value)) } // An example method that returns a Result that can be called within a benchmark diff --git a/substrate/frame/examples/basic/src/lib.rs b/substrate/frame/examples/basic/src/lib.rs index dad4d01978f95391e88cd99dce82f86f9d942e87..12cadc969fd74288fc2ca99ac014fa98a603d56d 100644 --- a/substrate/frame/examples/basic/src/lib.rs +++ b/substrate/frame/examples/basic/src/lib.rs @@ -286,9 +286,7 @@ pub mod pallet { let _sender = ensure_signed(origin)?; // Read the value of dummy from storage. - // let dummy = Self::dummy(); - // Will also work using the `::get` on the storage item type itself: - // let dummy = >::get(); + // let dummy = Dummy::::get(); // Calculate the new value. // let new_dummy = dummy.map_or(increase_by, |dummy| dummy + increase_by); @@ -381,20 +379,14 @@ pub mod pallet { // - `Foo::put(1); Foo::get()` returns `1`; // - `Foo::kill(); Foo::get()` returns `0` (u32::default()). #[pallet::storage] - // The getter attribute generate a function on `Pallet` placeholder: - // `fn getter_name() -> Type` for basic value items or - // `fn getter_name(key: KeyType) -> ValueType` for map items. - #[pallet::getter(fn dummy)] pub(super) type Dummy = StorageValue<_, T::Balance>; // A map that has enumerable entries. #[pallet::storage] - #[pallet::getter(fn bar)] pub(super) type Bar = StorageMap<_, Blake2_128Concat, T::AccountId, T::Balance>; // this one uses the query kind: `ValueQuery`, we'll demonstrate the usage of 'mutate' API. #[pallet::storage] - #[pallet::getter(fn foo)] pub(super) type Foo = StorageValue<_, T::Balance, ValueQuery>; #[pallet::storage] @@ -433,10 +425,10 @@ impl Pallet { fn accumulate_foo(origin: T::RuntimeOrigin, increase_by: T::Balance) -> DispatchResult { let _sender = ensure_signed(origin)?; - let prev = >::get(); + let prev = Foo::::get(); // Because Foo has 'default', the type of 'foo' in closure is the raw type instead of an // Option<> type. - let result = >::mutate(|foo| { + let result = Foo::::mutate(|foo| { *foo = foo.saturating_add(increase_by); *foo }); diff --git a/substrate/frame/examples/basic/src/tests.rs b/substrate/frame/examples/basic/src/tests.rs index 9434ace35ffe35467a9c513d67f0450e5b6ade6e..207e46e428ddc62359621bd11eb226e54ab3f92e 100644 --- a/substrate/frame/examples/basic/src/tests.rs +++ b/substrate/frame/examples/basic/src/tests.rs @@ -119,25 +119,25 @@ fn it_works_for_optional_value() { // Check that GenesisBuilder works properly. let val1 = 42; let val2 = 27; - assert_eq!(Example::dummy(), Some(val1)); + assert_eq!(Dummy::::get(), Some(val1)); // Check that accumulate works when we have Some value in Dummy already. assert_ok!(Example::accumulate_dummy(RuntimeOrigin::signed(1), val2)); - assert_eq!(Example::dummy(), Some(val1 + val2)); + assert_eq!(Dummy::::get(), Some(val1 + val2)); // Check that accumulate works when we Dummy has None in it. >::on_initialize(2); assert_ok!(Example::accumulate_dummy(RuntimeOrigin::signed(1), val1)); - assert_eq!(Example::dummy(), Some(val1 + val2 + val1)); + assert_eq!(Dummy::::get(), Some(val1 + val2 + val1)); }); } #[test] fn it_works_for_default_value() { new_test_ext().execute_with(|| { - assert_eq!(Example::foo(), 24); + assert_eq!(Foo::::get(), 24); assert_ok!(Example::accumulate_foo(RuntimeOrigin::signed(1), 1)); - assert_eq!(Example::foo(), 25); + assert_eq!(Foo::::get(), 25); }); } @@ -146,7 +146,7 @@ fn set_dummy_works() { new_test_ext().execute_with(|| { let test_val = 133; assert_ok!(Example::set_dummy(RuntimeOrigin::root(), test_val.into())); - assert_eq!(Example::dummy(), Some(test_val)); + assert_eq!(Dummy::::get(), Some(test_val)); }); } diff --git a/substrate/frame/examples/default-config/src/lib.rs b/substrate/frame/examples/default-config/src/lib.rs index cd1653e6c764358165c8306d4810c1d4c9bea3b4..224faf9dfd431a0ce9281c406738b7aa2fddb0f6 100644 --- a/substrate/frame/examples/default-config/src/lib.rs +++ b/substrate/frame/examples/default-config/src/lib.rs @@ -108,7 +108,7 @@ pub mod pallet { } /// A type providing default configurations for this pallet in another environment. Examples - /// could be a parachain, or a solo-chain. + /// could be a parachain, or a solochain. /// /// Appropriate derive for `frame_system::DefaultConfig` needs to be provided. In this /// example, we simple derive `frame_system::config_preludes::TestDefaultConfig` again. @@ -149,7 +149,7 @@ pub mod tests { } ); - #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { // these items are defined by frame-system as `no_default`, so we must specify them here. type Block = Block; diff --git a/substrate/frame/examples/kitchensink/src/benchmarking.rs b/substrate/frame/examples/kitchensink/src/benchmarking.rs index 24da581fc967b8e8c8586bf3b41a4452a89a4dcd..5f1d378e06fe67ee3978124ce7b557ad39b89e0a 100644 --- a/substrate/frame/examples/kitchensink/src/benchmarking.rs +++ b/substrate/frame/examples/kitchensink/src/benchmarking.rs @@ -51,7 +51,7 @@ mod benchmarks { set_foo(RawOrigin::Root, value, 10u128); // The execution phase is just running `set_foo` extrinsic call // This is the optional benchmark verification phase, asserting certain states. - assert_eq!(Pallet::::foo(), Some(value)) + assert_eq!(Foo::::get(), Some(value)) } // This line generates test cases for benchmarking, and could be run by: diff --git a/substrate/frame/examples/kitchensink/src/lib.rs b/substrate/frame/examples/kitchensink/src/lib.rs index 18429bc967d7c1e1f7b181e708ca3a86ef6f251c..b7425b0c0846afc3e9488a490144a89e7b647301 100644 --- a/substrate/frame/examples/kitchensink/src/lib.rs +++ b/substrate/frame/examples/kitchensink/src/lib.rs @@ -125,7 +125,6 @@ pub mod pallet { #[pallet::storage] #[pallet::unbounded] // optional #[pallet::storage_prefix = "OtherFoo"] // optional - #[pallet::getter(fn foo)] // optional pub type Foo = StorageValue; #[pallet::type_value] diff --git a/substrate/frame/examples/offchain-worker/src/lib.rs b/substrate/frame/examples/offchain-worker/src/lib.rs index 6c1fa6ea8ec42dbee21875a0610b3724aadff802..d21c8b4cfd24971f945b73c558463ee7a1f47f77 100644 --- a/substrate/frame/examples/offchain-worker/src/lib.rs +++ b/substrate/frame/examples/offchain-worker/src/lib.rs @@ -332,7 +332,6 @@ pub mod pallet { /// /// This is used to calculate average price, should have bounded size. #[pallet::storage] - #[pallet::getter(fn prices)] pub(super) type Prices = StorageValue<_, BoundedVec, ValueQuery>; /// Defines the block when next unsigned transaction will be accepted. @@ -341,7 +340,6 @@ pub mod pallet { /// we only allow one transaction every `T::UnsignedInterval` blocks. /// This storage entry defines when new transaction is going to be accepted. #[pallet::storage] - #[pallet::getter(fn next_unsigned_at)] pub(super) type NextUnsignedAt = StorageValue<_, BlockNumberFor, ValueQuery>; } @@ -479,7 +477,7 @@ impl Pallet { ) -> Result<(), &'static str> { // Make sure we don't fetch the price if unsigned transaction is going to be rejected // anyway. - let next_unsigned_at = >::get(); + let next_unsigned_at = NextUnsignedAt::::get(); if next_unsigned_at > block_number { return Err("Too early to send unsigned transaction") } @@ -513,7 +511,7 @@ impl Pallet { ) -> Result<(), &'static str> { // Make sure we don't fetch the price if unsigned transaction is going to be rejected // anyway. - let next_unsigned_at = >::get(); + let next_unsigned_at = NextUnsignedAt::::get(); if next_unsigned_at > block_number { return Err("Too early to send unsigned transaction") } @@ -543,7 +541,7 @@ impl Pallet { ) -> Result<(), &'static str> { // Make sure we don't fetch the price if unsigned transaction is going to be rejected // anyway. - let next_unsigned_at = >::get(); + let next_unsigned_at = NextUnsignedAt::::get(); if next_unsigned_at > block_number { return Err("Too early to send unsigned transaction") } @@ -664,7 +662,7 @@ impl Pallet { /// Calculate current average price. fn average_price() -> Option { - let prices = >::get(); + let prices = Prices::::get(); if prices.is_empty() { None } else { @@ -677,7 +675,7 @@ impl Pallet { new_price: &u32, ) -> TransactionValidity { // Now let's check if the transaction has any chance to succeed. - let next_unsigned_at = >::get(); + let next_unsigned_at = NextUnsignedAt::::get(); if &next_unsigned_at > block_number { return InvalidTransaction::Stale.into() } diff --git a/substrate/frame/examples/single-block-migrations/Cargo.toml b/substrate/frame/examples/single-block-migrations/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..5059b2433c77139568d99326473cded73a5af9e9 --- /dev/null +++ b/substrate/frame/examples/single-block-migrations/Cargo.toml @@ -0,0 +1,61 @@ +[package] +name = "pallet-example-single-block-migrations" +version = "0.0.1" +authors.workspace = true +edition.workspace = true +license = "MIT-0" +homepage = "https://substrate.io" +repository.workspace = true +description = "FRAME example pallet demonstrating best-practices for writing storage migrations." +publish = false + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +docify = { version = "0.2.3", default-features = false } +log = { version = "0.4.20", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +frame-support = { path = "../../support", default-features = false } +frame-executive = { path = "../../executive", default-features = false } +frame-system = { path = "../../system", default-features = false } +frame-try-runtime = { path = "../../try-runtime", default-features = false, optional = true } +pallet-balances = { path = "../../balances", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-core = { path = "../../../primitives/core", default-features = false } +sp-io = { path = "../../../primitives/io", default-features = false } +sp-version = { path = "../../../primitives/version", default-features = false } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-executive/std", + "frame-support/std", + "frame-system/std", + "frame-try-runtime/std", + "log/std", + "pallet-balances/std", + "scale-info/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", + "sp-version/std", +] +runtime-benchmarks = [ + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] +try-runtime = [ + "frame-executive/try-runtime", + "frame-support/try-runtime", + "frame-system/try-runtime", + "frame-try-runtime/try-runtime", + "pallet-balances/try-runtime", + "sp-runtime/try-runtime", +] diff --git a/substrate/frame/examples/single-block-migrations/src/lib.rs b/substrate/frame/examples/single-block-migrations/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..86a9e5d6e95bbeaa0230b81153c3f4149f332436 --- /dev/null +++ b/substrate/frame/examples/single-block-migrations/src/lib.rs @@ -0,0 +1,213 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Single Block Migration Example Pallet +//! +//! An example pallet demonstrating best-practices for writing single-block migrations in the +//! context of upgrading pallet storage. +//! +//! ## Forwarning +//! +//! Single block migrations **MUST** execute in a single block, therefore when executed on a +//! parachain are only appropriate when guaranteed to not exceed block weight limits. If a +//! parachain submits a block that exceeds the block weight limit it will **brick the chain**! +//! +//! If weight is a concern or you are not sure which type of migration to use, you should probably +//! use a multi-block migration. +//! +//! TODO: Link above to multi-block migration example. +//! +//! ## Pallet Overview +//! +//! This example pallet contains a single storage item [`Value`](pallet::Value), which may be set by +//! any signed origin by calling the [`set_value`](crate::Call::set_value) extrinsic. +//! +//! For the purposes of this exercise, we imagine that in [`StorageVersion`] V0 of this pallet +//! [`Value`](pallet::Value) is a `u32`, and this what is currently stored on-chain. +//! +//! ```ignore +//! // (Old) Storage Version V0 representation of `Value` +//! #[pallet::storage] +//! pub type Value = StorageValue<_, u32>; +//! ``` +//! +//! In [`StorageVersion`] V1 of the pallet a new struct [`CurrentAndPreviousValue`] is introduced: +#![doc = docify::embed!("src/lib.rs", CurrentAndPreviousValue)] +//! and [`Value`](pallet::Value) is updated to store this new struct instead of a `u32`: +#![doc = docify::embed!("src/lib.rs", Value)] +//! +//! In StorageVersion V1 of the pallet when [`set_value`](crate::Call::set_value) is called, the +//! new value is stored in the `current` field of [`CurrentAndPreviousValue`], and the previous +//! value (if it exists) is stored in the `previous` field. +#![doc = docify::embed!("src/lib.rs", pallet_calls)] +//! +//! ## Why a migration is necessary +//! +//! Without a migration, there will be a discrepancy between the on-chain storage for [`Value`] (in +//! V0 it is a `u32`) and the current storage for [`Value`] (in V1 it was changed to a +//! [`CurrentAndPreviousValue`] struct). +//! +//! The on-chain storage for [`Value`] would be a `u32` but the runtime would try to read it as a +//! [`CurrentAndPreviousValue`]. This would result in unacceptable undefined behavior. +//! +//! ## Adding a migration module +//! +//! Writing a pallets migrations in a seperate module is strongly recommended. +//! +//! Here's how the migration module is defined for this pallet: +//! +//! ```text +//! substrate/frame/examples/single-block-migrations/src/ +//! ├── lib.rs <-- pallet definition +//! ├── Cargo.toml <-- pallet manifest +//! └── migrations/ +//! ├── mod.rs <-- migrations module definition +//! └── v1.rs <-- migration logic for the V0 to V1 transition +//! ``` +//! +//! This structure allows keeping migration logic separate from the pallet logic and +//! easily adding new migrations in the future. +//! +//! ## Writing the Migration +//! +//! All code related to the migration can be found under +//! [`v1.rs`](migrations::v1). +//! +//! See the migration source code for detailed comments. +//! +//! To keep the migration logic organised, it is split across additional modules: +//! +//! ### `mod v0` +//! +//! Here we define a [`storage_alias`](frame_support::storage_alias) for the old v0 [`Value`] +//! format. +//! +//! This allows reading the old v0 value from storage during the migration. +//! +//! ### `mod version_unchecked` +//! +//! Here we define our raw migration logic, +//! `version_unchecked::MigrateV0ToV1` which implements the [`OnRuntimeUpgrade`] trait. +//! +//! Importantly, it is kept in a private module so that it cannot be accidentally used in a runtime. +//! +//! Private modules cannot be referenced in docs, so please read the code directly. +//! +//! #### Standalone Struct or Pallet Hook? +//! +//! Note that the storage migration logic is attached to a standalone struct implementing +//! [`OnRuntimeUpgrade`], rather than implementing the +//! [`Hooks::on_runtime_upgrade`](frame_support::traits::Hooks::on_runtime_upgrade) hook directly on +//! the pallet. The pallet hook is better suited for special types of logic that need to execute on +//! every runtime upgrade, but not so much for one-off storage migrations. +//! +//! ### `pub mod versioned` +//! +//! Here, `version_unchecked::MigrateV0ToV1` is wrapped in a +//! [`VersionedMigration`] to define +//! [`versioned::MigrateV0ToV1`](crate::migrations::v1::versioned::MigrateV0ToV1), which may be used +//! in runtimes. +//! +//! Using [`VersionedMigration`] ensures that +//! - The migration only runs once when the on-chain storage version is `0` +//! - The on-chain storage version is updated to `1` after the migration executes +//! - Reads and writes from checking and setting the on-chain storage version are accounted for in +//! the final [`Weight`](frame_support::weights::Weight) +//! +//! This is the only public module exported from `v1`. +//! +//! ### `mod test` +//! +//! Here basic unit tests are defined for the migration. +//! +//! When writing migration tests, don't forget to check: +//! - `on_runtime_upgrade` returns the expected weight +//! - `post_upgrade` succeeds when given the bytes returned by `pre_upgrade` +//! - Pallet storage is in the expected state after the migration +//! +//! [`VersionedMigration`]: frame_support::migrations::VersionedMigration +//! [`GetStorageVersion`]: frame_support::traits::GetStorageVersion +//! [`OnRuntimeUpgrade`]: frame_support::traits::OnRuntimeUpgrade +//! [`MigrateV0ToV1`]: crate::migrations::v1::versioned::MigrationV0ToV1 + +// We make sure this pallet uses `no_std` for compiling to Wasm. +#![cfg_attr(not(feature = "std"), no_std)] +// allow non-camel-case names for storage version V0 value +#![allow(non_camel_case_types)] + +// Re-export pallet items so that they can be accessed from the crate namespace. +pub use pallet::*; + +// Export migrations so they may be used in the runtime. +pub mod migrations; +#[doc(hidden)] +mod mock; +use codec::{Decode, Encode, MaxEncodedLen}; +use frame_support::traits::StorageVersion; +use sp_runtime::RuntimeDebug; + +/// Example struct holding the most recently set [`u32`] and the +/// second most recently set [`u32`] (if one existed). +#[docify::export] +#[derive( + Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, scale_info::TypeInfo, MaxEncodedLen, +)] +pub struct CurrentAndPreviousValue { + /// The most recently set value. + pub current: u32, + /// The previous value, if one existed. + pub previous: Option, +} + +// Pallet for demonstrating storage migrations. +#[frame_support::pallet(dev_mode)] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + /// Define the current [`StorageVersion`] of the pallet. + const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + + #[pallet::pallet] + #[pallet::storage_version(STORAGE_VERSION)] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config {} + + /// [`StorageVersion`] V1 of [`Value`]. + /// + /// Currently used. + #[docify::export] + #[pallet::storage] + pub type Value = StorageValue<_, CurrentAndPreviousValue>; + + #[docify::export(pallet_calls)] + #[pallet::call] + impl Pallet { + pub fn set_value(origin: OriginFor, value: u32) -> DispatchResult { + ensure_signed(origin)?; + + let previous = Value::::get().map(|v| v.current); + let new_struct = CurrentAndPreviousValue { current: value, previous }; + >::put(new_struct); + + Ok(()) + } + } +} diff --git a/substrate/frame/examples/single-block-migrations/src/migrations/mod.rs b/substrate/frame/examples/single-block-migrations/src/migrations/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..80a33f69941aa1a49563971ab0c9e13a88ecda19 --- /dev/null +++ b/substrate/frame/examples/single-block-migrations/src/migrations/mod.rs @@ -0,0 +1,20 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/// Module containing all logic associated with the example migration from +/// [`StorageVersion`](frame_support::traits::StorageVersion) V0 to V1. +pub mod v1; diff --git a/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs b/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs new file mode 100644 index 0000000000000000000000000000000000000000..b46640a320207551ab55b1dc793574ce68c4c26c --- /dev/null +++ b/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs @@ -0,0 +1,222 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame_support::{ + storage_alias, + traits::{Get, OnRuntimeUpgrade}, +}; + +#[cfg(feature = "try-runtime")] +use sp_std::vec::Vec; + +/// Collection of storage item formats from the previous storage version. +/// +/// Required so we can read values in the v0 storage format during the migration. +mod v0 { + use super::*; + + /// V0 type for [`crate::Value`]. + #[storage_alias] + pub type Value = StorageValue, u32>; +} + +/// Private module containing *version unchecked* migration logic. +/// +/// Should only be used by the [`VersionedMigration`](frame_support::migrations::VersionedMigration) +/// type in this module to create something to export. +/// +/// The unversioned migration should be kept private so the unversioned migration cannot +/// accidentally be used in any runtimes. +/// +/// For more about this pattern of keeping items private, see +/// - +/// - +mod version_unchecked { + use super::*; + + /// Implements [`OnRuntimeUpgrade`], migrating the state of this pallet from V0 to V1. + /// + /// In V0 of the template [`crate::Value`] is just a `u32`. In V1, it has been upgraded to + /// contain the struct [`crate::CurrentAndPreviousValue`]. + /// + /// In this migration, update the on-chain storage for the pallet to reflect the new storage + /// layout. + pub struct MigrateV0ToV1(sp_std::marker::PhantomData); + + impl OnRuntimeUpgrade for MigrateV0ToV1 { + /// Return the existing [`crate::Value`] so we can check that it was correctly set in + /// `version_unchecked::MigrateV0ToV1::post_upgrade`. + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + use codec::Encode; + + // Access the old value using the `storage_alias` type + let old_value = v0::Value::::get(); + // Return it as an encoded `Vec` + Ok(old_value.encode()) + } + + /// Migrate the storage from V0 to V1. + /// + /// - If the value doesn't exist, there is nothing to do. + /// - If the value exists, it is read and then written back to storage inside a + /// [`crate::CurrentAndPreviousValue`]. + fn on_runtime_upgrade() -> frame_support::weights::Weight { + // Read the old value from storage + if let Some(old_value) = v0::Value::::take() { + // Write the new value to storage + let new = crate::CurrentAndPreviousValue { current: old_value, previous: None }; + crate::Value::::put(new); + // One read for the old value, one write for the new value + T::DbWeight::get().reads_writes(1, 1) + } else { + // One read for trying to access the old value + T::DbWeight::get().reads(1) + } + } + + /// Verifies the storage was migrated correctly. + /// + /// - If there was no old value, the new value should not be set. + /// - If there was an old value, the new value should be a + /// [`crate::CurrentAndPreviousValue`]. + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + use codec::Decode; + use frame_support::ensure; + + let maybe_old_value = Option::::decode(&mut &state[..]).map_err(|_| { + sp_runtime::TryRuntimeError::Other("Failed to decode old value from storage") + })?; + + match maybe_old_value { + Some(old_value) => { + let expected_new_value = + crate::CurrentAndPreviousValue { current: old_value, previous: None }; + let actual_new_value = crate::Value::::get(); + + ensure!(actual_new_value.is_some(), "New value not set"); + ensure!( + actual_new_value == Some(expected_new_value), + "New value not set correctly" + ); + }, + None => { + ensure!(crate::Value::::get().is_none(), "New value unexpectedly set"); + }, + }; + Ok(()) + } + } +} + +/// Public module containing *version checked* migration logic. +/// +/// This is the only module that should be exported from this module. +/// +/// See [`VersionedMigration`](frame_support::migrations::VersionedMigration) docs for more about +/// how it works. +pub mod versioned { + use super::*; + + /// `version_unchecked::MigrateV0ToV1` wrapped in a + /// [`VersionedMigration`](frame_support::migrations::VersionedMigration), which ensures that: + /// - The migration only runs once when the on-chain storage version is 0 + /// - The on-chain storage version is updated to `1` after the migration executes + /// - Reads/Writes from checking/settings the on-chain storage version are accounted for + pub type MigrateV0ToV1 = frame_support::migrations::VersionedMigration< + 0, // The migration will only execute when the on-chain storage version is 0 + 1, // The on-chain storage version will be set to 1 after the migration is complete + version_unchecked::MigrateV0ToV1, + crate::pallet::Pallet, + ::DbWeight, + >; +} + +/// Tests for our migration. +/// +/// When writing migration tests, it is important to check: +/// 1. `on_runtime_upgrade` returns the expected weight +/// 2. `post_upgrade` succeeds when given the bytes returned by `pre_upgrade` +/// 3. The storage is in the expected state after the migration +#[cfg(any(all(feature = "try-runtime", test), doc))] +mod test { + use super::*; + use crate::mock::{new_test_ext, MockRuntime}; + use frame_support::assert_ok; + use version_unchecked::MigrateV0ToV1; + + #[test] + fn handles_no_existing_value() { + new_test_ext().execute_with(|| { + // By default, no value should be set. Verify this assumption. + assert!(crate::Value::::get().is_none()); + assert!(v0::Value::::get().is_none()); + + // Get the pre_upgrade bytes + let bytes = match MigrateV0ToV1::::pre_upgrade() { + Ok(bytes) => bytes, + Err(e) => panic!("pre_upgrade failed: {:?}", e), + }; + + // Execute the migration + let weight = MigrateV0ToV1::::on_runtime_upgrade(); + + // Verify post_upgrade succeeds + assert_ok!(MigrateV0ToV1::::post_upgrade(bytes)); + + // The weight should be just 1 read for trying to access the old value. + assert_eq!(weight, ::DbWeight::get().reads(1)); + + // After the migration, no value should have been set. + assert!(crate::Value::::get().is_none()); + }) + } + + #[test] + fn handles_existing_value() { + new_test_ext().execute_with(|| { + // Set up an initial value + let initial_value = 42; + v0::Value::::put(initial_value); + + // Get the pre_upgrade bytes + let bytes = match MigrateV0ToV1::::pre_upgrade() { + Ok(bytes) => bytes, + Err(e) => panic!("pre_upgrade failed: {:?}", e), + }; + + // Execute the migration + let weight = MigrateV0ToV1::::on_runtime_upgrade(); + + // Verify post_upgrade succeeds + assert_ok!(MigrateV0ToV1::::post_upgrade(bytes)); + + // The weight used should be 1 read for the old value, and 1 write for the new + // value. + assert_eq!( + weight, + ::DbWeight::get().reads_writes(1, 1) + ); + + // After the migration, the new value should be set as the `current` value. + let expected_new_value = + crate::CurrentAndPreviousValue { current: initial_value, previous: None }; + assert_eq!(crate::Value::::get(), Some(expected_new_value)); + }) + } +} diff --git a/substrate/frame/examples/single-block-migrations/src/mock.rs b/substrate/frame/examples/single-block-migrations/src/mock.rs new file mode 100644 index 0000000000000000000000000000000000000000..02f72e01836f2462de2043cae737ea46ec2072ed --- /dev/null +++ b/substrate/frame/examples/single-block-migrations/src/mock.rs @@ -0,0 +1,69 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg(any(all(feature = "try-runtime", test), doc))] + +use crate::*; +use frame_support::{derive_impl, traits::ConstU64, weights::constants::ParityDbWeight}; + +// Re-export crate as its pallet name for construct_runtime. +use crate as pallet_example_storage_migration; + +type Block = frame_system::mocking::MockBlock; + +// For testing the pallet, we construct a mock runtime. +frame_support::construct_runtime!( + pub struct MockRuntime { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Example: pallet_example_storage_migration::{Pallet, Call, Storage}, + } +); + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for MockRuntime { + type Block = Block; + type AccountData = pallet_balances::AccountData; + type DbWeight = ParityDbWeight; +} + +impl pallet_balances::Config for MockRuntime { + type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; + type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type Balance = u64; + type DustRemoval = (); + type RuntimeEvent = RuntimeEvent; + type ExistentialDeposit = ConstU64<1>; + type AccountStore = System; + type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); +} + +impl Config for MockRuntime {} + +pub fn new_test_ext() -> sp_io::TestExternalities { + use sp_runtime::BuildStorage; + + let t = RuntimeGenesisConfig { system: Default::default(), balances: Default::default() } + .build_storage() + .unwrap(); + t.into() +} diff --git a/substrate/frame/examples/split/src/lib.rs b/substrate/frame/examples/split/src/lib.rs index 74d2e0cc24b7bf5789da19f2657c5e7d2514bbfe..5245d90e390cff1ccab79ecc279fe57188cde030 100644 --- a/substrate/frame/examples/split/src/lib.rs +++ b/substrate/frame/examples/split/src/lib.rs @@ -107,7 +107,7 @@ pub mod pallet { let _who = ensure_signed(origin)?; // Read a value from storage. - match >::get() { + match Something::::get() { // Return an error if the value has not been set. None => return Err(Error::::NoneValue.into()), Some(old) => { diff --git a/substrate/frame/examples/src/lib.rs b/substrate/frame/examples/src/lib.rs index f38bbe52dc114900e4b497cb17da6deb4406512c..dee23a41379fc8b732252b9fe4db39bf809699fd 100644 --- a/substrate/frame/examples/src/lib.rs +++ b/substrate/frame/examples/src/lib.rs @@ -43,6 +43,9 @@ //! - [`pallet_example_frame_crate`]: Example pallet showcasing how one can be //! built using only the `frame` umbrella crate. //! +//! - [`pallet_example_single_block_migrations`]: An example pallet demonstrating best-practices for +//! writing storage migrations. +//! //! - [`pallet_example_tasks`]: This pallet demonstrates the use of `Tasks` to execute service work. //! //! **Tip**: Use `cargo doc --package --open` to view each pallet's documentation. diff --git a/substrate/frame/executive/Cargo.toml b/substrate/frame/executive/Cargo.toml index a4ca265f6178218791bf83f6b8f259821e086a98..63285e4cb4939c10db3342b5d59eeffc63103d48 100644 --- a/substrate/frame/executive/Cargo.toml +++ b/substrate/frame/executive/Cargo.toml @@ -16,6 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +aquamarine = "0.3.2" codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } @@ -44,6 +45,7 @@ default = ["std"] with-tracing = ["sp-tracing/with-tracing"] std = [ "codec/std", + "frame-support/experimental", "frame-support/std", "frame-system/std", "frame-try-runtime/std", diff --git a/substrate/frame/executive/src/lib.rs b/substrate/frame/executive/src/lib.rs index 48ff675f8082ddd0e7779c946b32dde66b171078..3028eaf318e0881c1b6f4d4ea6ac17adfe99a75f 100644 --- a/substrate/frame/executive/src/lib.rs +++ b/substrate/frame/executive/src/lib.rs @@ -15,6 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +#![cfg_attr(not(feature = "std"), no_std)] + //! # Executive Module //! //! The Executive module acts as the orchestration layer for the runtime. It dispatches incoming @@ -35,6 +37,8 @@ //! - Finalize a block. //! - Start an off-chain worker. //! +//! The flow of their application in a block is explained in the [block flowchart](block_flowchart). +//! //! ### Implementations //! //! The Executive module provides the following implementations: @@ -114,17 +118,51 @@ //! pub type Executive = executive::Executive; //! ``` -#![cfg_attr(not(feature = "std"), no_std)] +#[cfg(doc)] +#[cfg_attr(doc, aquamarine::aquamarine)] +/// # Block Execution +/// +/// These are the steps of block execution as done by [`Executive::execute_block`]. A block is +/// invalid if any of them fail. +/// +/// ```mermaid +/// flowchart TD +/// Executive::execute_block --> on_runtime_upgrade +/// on_runtime_upgrade --> System::initialize +/// Executive::initialize_block --> System::initialize +/// System::initialize --> on_initialize +/// on_initialize --> PreInherents[System::PreInherents] +/// PreInherents --> Inherents[Apply Inherents] +/// Inherents --> PostInherents[System::PostInherents] +/// PostInherents --> Check{MBM ongoing?} +/// Check -->|No| poll +/// Check -->|Yes| post_transactions_2[System::PostTransaction] +/// post_transactions_2 --> Step[MBMs::step] +/// Step --> on_finalize +/// poll --> transactions[Apply Transactions] +/// transactions --> post_transactions_1[System::PostTransaction] +/// post_transactions_1 --> CheckIdle{Weight remaining?} +/// CheckIdle -->|Yes| on_idle +/// CheckIdle -->|No| on_finalize +/// on_idle --> on_finalize +/// ``` +pub mod block_flowchart {} + +#[cfg(test)] +mod tests; use codec::{Codec, Encode}; use frame_support::{ + defensive_assert, dispatch::{DispatchClass, DispatchInfo, GetDispatchInfo, PostDispatchInfo}, + migrations::MultiStepMigrator, pallet_prelude::InvalidTransaction, traits::{ BeforeAllRuntimeMigrations, EnsureInherentsAreFirst, ExecuteBlock, OffchainWorker, - OnFinalize, OnIdle, OnInitialize, OnRuntimeUpgrade, + OnFinalize, OnIdle, OnInitialize, OnPoll, OnRuntimeUpgrade, PostInherents, + PostTransactions, PreInherents, }, - weights::Weight, + weights::{Weight, WeightMeter}, }; use frame_system::pallet_prelude::BlockNumberFor; use sp_runtime::{ @@ -134,7 +172,7 @@ use sp_runtime::{ ValidateUnsigned, Zero, }, transaction_validity::{TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, + ApplyExtrinsicResult, ExtrinsicInclusionMode, }; use sp_std::{marker::PhantomData, prelude::*}; @@ -198,7 +236,8 @@ impl< + OnInitialize> + OnIdle> + OnFinalize> - + OffchainWorker>, + + OffchainWorker> + + OnPoll>, COnRuntimeUpgrade: OnRuntimeUpgrade, > ExecuteBlock for Executive @@ -237,6 +276,7 @@ impl< + OnIdle> + OnFinalize> + OffchainWorker> + + OnPoll> + TryState> + TryDecodeEntireStorage, COnRuntimeUpgrade: OnRuntimeUpgrade, @@ -272,36 +312,50 @@ where select, ); - Self::initialize_block(block.header()); - Self::initial_checks(&block); - + let mode = Self::initialize_block(block.header()); + let num_inherents = Self::initial_checks(&block) as usize; let (header, extrinsics) = block.deconstruct(); + // Check if there are any forbidden non-inherents in the block. + if mode == ExtrinsicInclusionMode::OnlyInherents && extrinsics.len() > num_inherents { + return Err("Only inherents allowed".into()) + } + let try_apply_extrinsic = |uxt: Block::Extrinsic| -> ApplyExtrinsicResult { sp_io::init_tracing(); let encoded = uxt.encode(); let encoded_len = encoded.len(); + let is_inherent = System::is_inherent(&uxt); // skip signature verification. let xt = if signature_check { uxt.check(&Default::default()) } else { uxt.unchecked_into_checked_i_know_what_i_am_doing(&Default::default()) }?; - >::note_extrinsic(encoded); let dispatch_info = xt.get_dispatch_info(); + if !is_inherent && !>::inherents_applied() { + Self::inherents_applied(); + } + + >::note_extrinsic(encoded); let r = Applyable::apply::(xt, &dispatch_info, encoded_len)?; + if r.is_err() && dispatch_info.class == DispatchClass::Mandatory { + return Err(InvalidTransaction::BadMandatory.into()) + } + >::note_applied_extrinsic(&r, dispatch_info); Ok(r.map(|_| ()).map_err(|e| e.error)) }; - for e in extrinsics { + // Apply extrinsics: + for e in extrinsics.iter() { if let Err(err) = try_apply_extrinsic(e.clone()) { log::error!( - target: LOG_TARGET, "executing transaction {:?} failed due to {:?}. Aborting the rest of the block execution.", + target: LOG_TARGET, "transaction {:?} failed due to {:?}. Aborting the rest of the block execution.", e, err, ); @@ -309,9 +363,17 @@ where } } + // In this case there were no transactions to trigger this state transition: + if !>::inherents_applied() { + Self::inherents_applied(); + } + // post-extrinsics book-keeping >::note_finished_extrinsics(); - Self::idle_and_finalize_hook(*header.number()); + ::PostTransactions::post_transactions(); + + Self::on_idle_hook(*header.number()); + Self::on_finalize_hook(*header.number()); // run the try-state checks of all pallets, ensuring they don't alter any state. let _guard = frame_support::StorageNoopGuard::default(); @@ -449,7 +511,8 @@ impl< + OnInitialize> + OnIdle> + OnFinalize> - + OffchainWorker>, + + OffchainWorker> + + OnPoll>, COnRuntimeUpgrade: OnRuntimeUpgrade, > Executive where @@ -464,16 +527,36 @@ where pub fn execute_on_runtime_upgrade() -> Weight { let before_all_weight = ::before_all_runtime_migrations(); - <(COnRuntimeUpgrade, AllPalletsWithSystem) as OnRuntimeUpgrade>::on_runtime_upgrade() - .saturating_add(before_all_weight) + + let runtime_upgrade_weight = <( + COnRuntimeUpgrade, + ::SingleBlockMigrations, + // We want to run the migrations before we call into the pallets as they may + // access any state that would then not be migrated. + AllPalletsWithSystem, + ) as OnRuntimeUpgrade>::on_runtime_upgrade(); + + before_all_weight.saturating_add(runtime_upgrade_weight) } /// Start the execution of a particular block. - pub fn initialize_block(header: &frame_system::pallet_prelude::HeaderFor) { + pub fn initialize_block( + header: &frame_system::pallet_prelude::HeaderFor, + ) -> ExtrinsicInclusionMode { sp_io::init_tracing(); sp_tracing::enter_span!(sp_tracing::Level::TRACE, "init_block"); let digests = Self::extract_pre_digest(header); Self::initialize_block_impl(header.number(), header.parent_hash(), &digests); + + Self::extrinsic_mode() + } + + fn extrinsic_mode() -> ExtrinsicInclusionMode { + if ::MultiBlockMigrator::ongoing() { + ExtrinsicInclusionMode::OnlyInherents + } else { + ExtrinsicInclusionMode::AllExtrinsics + } } fn extract_pre_digest(header: &frame_system::pallet_prelude::HeaderFor) -> Digest { @@ -519,6 +602,7 @@ where ); frame_system::Pallet::::note_finished_initialize(); + ::PreInherents::pre_inherents(); } /// Returns if the runtime has been upgraded, based on [`frame_system::LastRuntimeUpgrade`]. @@ -529,7 +613,8 @@ where last.map(|v| v.was_upgraded(¤t)).unwrap_or(true) } - fn initial_checks(block: &Block) { + /// Returns the number of inherents in the block. + fn initial_checks(block: &Block) -> u32 { sp_tracing::enter_span!(sp_tracing::Level::TRACE, "initial_checks"); let header = block.header(); @@ -542,8 +627,9 @@ where "Parent hash should be valid.", ); - if let Err(i) = System::ensure_inherents_are_first(block) { - panic!("Invalid inherent position for extrinsic at index {}", i); + match System::ensure_inherents_are_first(block) { + Ok(num) => num, + Err(i) => panic!("Invalid inherent position for extrinsic at index {}", i), } } @@ -552,53 +638,90 @@ where sp_io::init_tracing(); sp_tracing::within_span! { sp_tracing::info_span!("execute_block", ?block); + // Execute `on_runtime_upgrade` and `on_initialize`. + let mode = Self::initialize_block(block.header()); + let num_inherents = Self::initial_checks(&block) as usize; + let (header, extrinsics) = block.deconstruct(); + let num_extrinsics = extrinsics.len(); - Self::initialize_block(block.header()); + if mode == ExtrinsicInclusionMode::OnlyInherents && num_extrinsics > num_inherents { + // Invalid block + panic!("Only inherents are allowed in this block") + } - // any initial checks - Self::initial_checks(&block); + Self::apply_extrinsics(extrinsics.into_iter()); - // execute extrinsics - let (header, extrinsics) = block.deconstruct(); - Self::execute_extrinsics_with_book_keeping(extrinsics, *header.number()); + // In this case there were no transactions to trigger this state transition: + if !>::inherents_applied() { + defensive_assert!(num_inherents == num_extrinsics); + Self::inherents_applied(); + } - // any final checks + >::note_finished_extrinsics(); + ::PostTransactions::post_transactions(); + + Self::on_idle_hook(*header.number()); + Self::on_finalize_hook(*header.number()); Self::final_checks(&header); } } - /// Execute given extrinsics and take care of post-extrinsics book-keeping. - fn execute_extrinsics_with_book_keeping( - extrinsics: Vec, - block_number: NumberFor, - ) { + /// Logic that runs directly after inherent application. + /// + /// It advances the Multi-Block-Migrations or runs the `on_poll` hook. + pub fn inherents_applied() { + >::note_inherents_applied(); + ::PostInherents::post_inherents(); + + if ::MultiBlockMigrator::ongoing() { + let used_weight = ::MultiBlockMigrator::step(); + >::register_extra_weight_unchecked( + used_weight, + DispatchClass::Mandatory, + ); + } else { + let block_number = >::block_number(); + Self::on_poll_hook(block_number); + } + } + + /// Execute given extrinsics. + fn apply_extrinsics(extrinsics: impl Iterator) { extrinsics.into_iter().for_each(|e| { if let Err(e) = Self::apply_extrinsic(e) { let err: &'static str = e.into(); panic!("{}", err) } }); - - // post-extrinsics book-keeping - >::note_finished_extrinsics(); - - Self::idle_and_finalize_hook(block_number); } /// Finalize the block - it is up the caller to ensure that all header fields are valid /// except state-root. + // Note: Only used by the block builder - not Executive itself. pub fn finalize_block() -> frame_system::pallet_prelude::HeaderFor { sp_io::init_tracing(); sp_tracing::enter_span!(sp_tracing::Level::TRACE, "finalize_block"); - >::note_finished_extrinsics(); - let block_number = >::block_number(); - Self::idle_and_finalize_hook(block_number); + // In this case there were no transactions to trigger this state transition: + if !>::inherents_applied() { + Self::inherents_applied(); + } + >::note_finished_extrinsics(); + ::PostTransactions::post_transactions(); + let block_number = >::block_number(); + Self::on_idle_hook(block_number); + Self::on_finalize_hook(block_number); >::finalize() } - fn idle_and_finalize_hook(block_number: NumberFor) { + /// Run the `on_idle` hook of all pallet, but only if there is weight remaining and there are no + /// ongoing MBMs. + fn on_idle_hook(block_number: NumberFor) { + if ::MultiBlockMigrator::ongoing() { + return + } + let weight = >::block_weight(); let max_weight = >::get().max_block; let remaining_weight = max_weight.saturating_sub(weight.total()); @@ -613,7 +736,33 @@ where DispatchClass::Mandatory, ); } + } + + fn on_poll_hook(block_number: NumberFor) { + defensive_assert!( + !::MultiBlockMigrator::ongoing(), + "on_poll should not be called during migrations" + ); + let weight = >::block_weight(); + let max_weight = >::get().max_block; + let remaining = max_weight.saturating_sub(weight.total()); + + if remaining.all_gt(Weight::zero()) { + let mut meter = WeightMeter::with_limit(remaining); + >>::on_poll( + block_number, + &mut meter, + ); + >::register_extra_weight_unchecked( + meter.consumed(), + DispatchClass::Mandatory, + ); + } + } + + /// Run the `on_finalize` hook of all pallet. + fn on_finalize_hook(block_number: NumberFor) { >>::on_finalize(block_number); } @@ -627,8 +776,18 @@ where let encoded_len = encoded.len(); sp_tracing::enter_span!(sp_tracing::info_span!("apply_extrinsic", ext=?sp_core::hexdisplay::HexDisplay::from(&encoded))); + + // We use the dedicated `is_inherent` check here, since just relying on `Mandatory` dispatch + // class does not capture optional inherents. + let is_inherent = System::is_inherent(&uxt); + // Verify that the signature is good. let xt = uxt.check(&Default::default())?; + let dispatch_info = xt.get_dispatch_info(); + + if !is_inherent && !>::inherents_applied() { + Self::inherents_applied(); + } // We don't need to make sure to `note_extrinsic` only after we know it's going to be // executed to prevent it from leaking in storage since at this point, it will either @@ -637,8 +796,6 @@ where // AUDIT: Under no circumstances may this function panic from here onwards. - // Decode parameters and dispatch - let dispatch_info = xt.get_dispatch_info(); let r = Applyable::apply::(xt, &dispatch_info, encoded_len)?; // Mandatory(inherents) are not allowed to fail. @@ -745,956 +902,3 @@ where ) } } - -#[cfg(test)] -mod tests { - use super::*; - - use sp_core::H256; - use sp_runtime::{ - generic::{DigestItem, Era}, - testing::{Block, Digest, Header}, - traits::{BlakeTwo256, Block as BlockT, Header as HeaderT, IdentityLookup}, - transaction_validity::{ - InvalidTransaction, TransactionValidityError, UnknownTransaction, ValidTransaction, - }, - BuildStorage, DispatchError, - }; - - use frame_support::{ - assert_err, derive_impl, parameter_types, - traits::{fungible, ConstU32, ConstU64, ConstU8, Currency}, - weights::{ConstantMultiplier, IdentityFee, RuntimeDbWeight, Weight, WeightToFee}, - }; - use frame_system::{ChainContext, LastRuntimeUpgrade, LastRuntimeUpgradeInfo}; - use pallet_balances::Call as BalancesCall; - use pallet_transaction_payment::CurrencyAdapter; - - const TEST_KEY: &[u8] = b":test:key:"; - - #[frame_support::pallet(dev_mode)] - mod custom { - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; - - #[pallet::pallet] - pub struct Pallet(_); - - #[pallet::config] - pub trait Config: frame_system::Config {} - - #[pallet::hooks] - impl Hooks> for Pallet { - // module hooks. - // one with block number arg and one without - fn on_initialize(n: BlockNumberFor) -> Weight { - println!("on_initialize({})", n); - Weight::from_parts(175, 0) - } - - fn on_idle(n: BlockNumberFor, remaining_weight: Weight) -> Weight { - println!("on_idle{}, {})", n, remaining_weight); - Weight::from_parts(175, 0) - } - - fn on_finalize(n: BlockNumberFor) { - println!("on_finalize({})", n); - } - - fn on_runtime_upgrade() -> Weight { - sp_io::storage::set(super::TEST_KEY, "module".as_bytes()); - Weight::from_parts(200, 0) - } - - fn offchain_worker(n: BlockNumberFor) { - assert_eq!(BlockNumberFor::::from(1u32), n); - } - } - - #[pallet::call] - impl Pallet { - pub fn some_function(origin: OriginFor) -> DispatchResult { - // NOTE: does not make any different. - frame_system::ensure_signed(origin)?; - Ok(()) - } - - #[pallet::weight((200, DispatchClass::Operational))] - pub fn some_root_operation(origin: OriginFor) -> DispatchResult { - frame_system::ensure_root(origin)?; - Ok(()) - } - - pub fn some_unsigned_message(origin: OriginFor) -> DispatchResult { - frame_system::ensure_none(origin)?; - Ok(()) - } - - pub fn allowed_unsigned(origin: OriginFor) -> DispatchResult { - frame_system::ensure_root(origin)?; - Ok(()) - } - - pub fn unallowed_unsigned(origin: OriginFor) -> DispatchResult { - frame_system::ensure_root(origin)?; - Ok(()) - } - - #[pallet::weight((0, DispatchClass::Mandatory))] - pub fn inherent_call(origin: OriginFor) -> DispatchResult { - frame_system::ensure_none(origin)?; - Ok(()) - } - - pub fn calculate_storage_root(_origin: OriginFor) -> DispatchResult { - let root = sp_io::storage::root(sp_runtime::StateVersion::V1); - sp_io::storage::set("storage_root".as_bytes(), &root); - Ok(()) - } - } - - #[pallet::inherent] - impl ProvideInherent for Pallet { - type Call = Call; - - type Error = sp_inherents::MakeFatalError<()>; - - const INHERENT_IDENTIFIER: [u8; 8] = *b"test1234"; - - fn create_inherent(_data: &InherentData) -> Option { - None - } - - fn is_inherent(call: &Self::Call) -> bool { - *call == Call::::inherent_call {} - } - } - - #[pallet::validate_unsigned] - impl ValidateUnsigned for Pallet { - type Call = Call; - - // Inherent call is accepted for being dispatched - fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { - match call { - Call::allowed_unsigned { .. } => Ok(()), - Call::inherent_call { .. } => Ok(()), - _ => Err(UnknownTransaction::NoUnsignedValidator.into()), - } - } - - // Inherent call is not validated as unsigned - fn validate_unsigned( - _source: TransactionSource, - call: &Self::Call, - ) -> TransactionValidity { - match call { - Call::allowed_unsigned { .. } => Ok(Default::default()), - _ => UnknownTransaction::NoUnsignedValidator.into(), - } - } - } - } - - frame_support::construct_runtime!( - pub enum Runtime { - System: frame_system, - Balances: pallet_balances, - TransactionPayment: pallet_transaction_payment, - Custom: custom, - } - ); - - parameter_types! { - pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::builder() - .base_block(Weight::from_parts(10, 0)) - .for_class(DispatchClass::all(), |weights| weights.base_extrinsic = Weight::from_parts(5, 0)) - .for_class(DispatchClass::non_mandatory(), |weights| weights.max_total = Weight::from_parts(1024, u64::MAX).into()) - .build_or_panic(); - pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { - read: 10, - write: 100, - }; - } - #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] - impl frame_system::Config for Runtime { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = BlockWeights; - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type Nonce = u64; - type RuntimeCall = RuntimeCall; - type Hash = sp_core::H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Block = TestBlock; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; - type Version = RuntimeVersion; - type PalletInfo = PalletInfo; - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = ConstU32<16>; - } - - type Balance = u64; - impl pallet_balances::Config for Runtime { - type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; - type AccountStore = System; - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<1>; - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); - } - - parameter_types! { - pub const TransactionByteFee: Balance = 0; - } - impl pallet_transaction_payment::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type OnChargeTransaction = CurrencyAdapter; - type OperationalFeeMultiplier = ConstU8<5>; - type WeightToFee = IdentityFee; - type LengthToFee = ConstantMultiplier; - type FeeMultiplierUpdate = (); - } - impl custom::Config for Runtime {} - - pub struct RuntimeVersion; - impl frame_support::traits::Get for RuntimeVersion { - fn get() -> sp_version::RuntimeVersion { - RuntimeVersionTestValues::get().clone() - } - } - - parameter_types! { - pub static RuntimeVersionTestValues: sp_version::RuntimeVersion = - Default::default(); - } - - type SignedExtra = ( - frame_system::CheckEra, - frame_system::CheckNonce, - frame_system::CheckWeight, - pallet_transaction_payment::ChargeTransactionPayment, - ); - type TestXt = sp_runtime::testing::TestXt; - type TestBlock = Block; - - // Will contain `true` when the custom runtime logic was called. - const CUSTOM_ON_RUNTIME_KEY: &[u8] = b":custom:on_runtime"; - - struct CustomOnRuntimeUpgrade; - impl OnRuntimeUpgrade for CustomOnRuntimeUpgrade { - fn on_runtime_upgrade() -> Weight { - sp_io::storage::set(TEST_KEY, "custom_upgrade".as_bytes()); - sp_io::storage::set(CUSTOM_ON_RUNTIME_KEY, &true.encode()); - System::deposit_event(frame_system::Event::CodeUpdated); - - assert_eq!(0, System::last_runtime_upgrade_spec_version()); - - Weight::from_parts(100, 0) - } - } - - type Executive = super::Executive< - Runtime, - Block, - ChainContext, - Runtime, - AllPalletsWithSystem, - CustomOnRuntimeUpgrade, - >; - - fn extra(nonce: u64, fee: Balance) -> SignedExtra { - ( - frame_system::CheckEra::from(Era::Immortal), - frame_system::CheckNonce::from(nonce), - frame_system::CheckWeight::new(), - pallet_transaction_payment::ChargeTransactionPayment::from(fee), - ) - } - - fn sign_extra(who: u64, nonce: u64, fee: Balance) -> Option<(u64, SignedExtra)> { - Some((who, extra(nonce, fee))) - } - - fn call_transfer(dest: u64, value: u64) -> RuntimeCall { - RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest, value }) - } - - #[test] - fn balance_transfer_dispatch_works() { - let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - pallet_balances::GenesisConfig:: { balances: vec![(1, 211)] } - .assimilate_storage(&mut t) - .unwrap(); - let xt = TestXt::new(call_transfer(2, 69), sign_extra(1, 0, 0)); - let weight = xt.get_dispatch_info().weight + - ::BlockWeights::get() - .get(DispatchClass::Normal) - .base_extrinsic; - let fee: Balance = - ::WeightToFee::weight_to_fee(&weight); - let mut t = sp_io::TestExternalities::new(t); - t.execute_with(|| { - Executive::initialize_block(&Header::new( - 1, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - let r = Executive::apply_extrinsic(xt); - assert!(r.is_ok()); - assert_eq!(>::total_balance(&1), 142 - fee); - assert_eq!(>::total_balance(&2), 69); - }); - } - - fn new_test_ext(balance_factor: Balance) -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - pallet_balances::GenesisConfig:: { balances: vec![(1, 111 * balance_factor)] } - .assimilate_storage(&mut t) - .unwrap(); - t.into() - } - - fn new_test_ext_v0(balance_factor: Balance) -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - pallet_balances::GenesisConfig:: { balances: vec![(1, 111 * balance_factor)] } - .assimilate_storage(&mut t) - .unwrap(); - (t, sp_runtime::StateVersion::V0).into() - } - - #[test] - fn block_import_works() { - block_import_works_inner( - new_test_ext_v0(1), - array_bytes::hex_n_into_unchecked( - "65e953676859e7a33245908af7ad3637d6861eb90416d433d485e95e2dd174a1", - ), - ); - block_import_works_inner( - new_test_ext(1), - array_bytes::hex_n_into_unchecked( - "5a19b3d6fdb7241836349fdcbe2d9df4d4f945b949d979e31ad50bff1cbcd1c2", - ), - ); - } - fn block_import_works_inner(mut ext: sp_io::TestExternalities, state_root: H256) { - ext.execute_with(|| { - Executive::execute_block(Block { - header: Header { - parent_hash: [69u8; 32].into(), - number: 1, - state_root, - extrinsics_root: array_bytes::hex_n_into_unchecked( - "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314", - ), - digest: Digest { logs: vec![] }, - }, - extrinsics: vec![], - }); - }); - } - - #[test] - #[should_panic] - fn block_import_of_bad_state_root_fails() { - new_test_ext(1).execute_with(|| { - Executive::execute_block(Block { - header: Header { - parent_hash: [69u8; 32].into(), - number: 1, - state_root: [0u8; 32].into(), - extrinsics_root: array_bytes::hex_n_into_unchecked( - "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314", - ), - digest: Digest { logs: vec![] }, - }, - extrinsics: vec![], - }); - }); - } - - #[test] - #[should_panic] - fn block_import_of_bad_extrinsic_root_fails() { - new_test_ext(1).execute_with(|| { - Executive::execute_block(Block { - header: Header { - parent_hash: [69u8; 32].into(), - number: 1, - state_root: array_bytes::hex_n_into_unchecked( - "75e7d8f360d375bbe91bcf8019c01ab6362448b4a89e3b329717eb9d910340e5", - ), - extrinsics_root: [0u8; 32].into(), - digest: Digest { logs: vec![] }, - }, - extrinsics: vec![], - }); - }); - } - - #[test] - fn bad_extrinsic_not_inserted() { - let mut t = new_test_ext(1); - // bad nonce check! - let xt = TestXt::new(call_transfer(33, 69), sign_extra(1, 30, 0)); - t.execute_with(|| { - Executive::initialize_block(&Header::new( - 1, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - assert_err!( - Executive::apply_extrinsic(xt), - TransactionValidityError::Invalid(InvalidTransaction::Future) - ); - assert_eq!(>::extrinsic_index(), Some(0)); - }); - } - - #[test] - fn block_weight_limit_enforced() { - let mut t = new_test_ext(10000); - // given: TestXt uses the encoded len as fixed Len: - let xt = TestXt::new( - RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), - sign_extra(1, 0, 0), - ); - let encoded = xt.encode(); - let encoded_len = encoded.len() as u64; - // on_initialize weight + base block execution weight - let block_weights = ::BlockWeights::get(); - let base_block_weight = Weight::from_parts(175, 0) + block_weights.base_block; - let limit = block_weights.get(DispatchClass::Normal).max_total.unwrap() - base_block_weight; - let num_to_exhaust_block = limit.ref_time() / (encoded_len + 5); - t.execute_with(|| { - Executive::initialize_block(&Header::new( - 1, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - // Base block execution weight + `on_initialize` weight from the custom module. - assert_eq!(>::block_weight().total(), base_block_weight); - - for nonce in 0..=num_to_exhaust_block { - let xt = TestXt::new( - RuntimeCall::Balances(BalancesCall::transfer_allow_death { - dest: 33, - value: 0, - }), - sign_extra(1, nonce.into(), 0), - ); - let res = Executive::apply_extrinsic(xt); - if nonce != num_to_exhaust_block { - assert!(res.is_ok()); - assert_eq!( - >::block_weight().total(), - //--------------------- on_initialize + block_execution + extrinsic_base weight - Weight::from_parts((encoded_len + 5) * (nonce + 1), 0) + base_block_weight, - ); - assert_eq!( - >::extrinsic_index(), - Some(nonce as u32 + 1) - ); - } else { - assert_eq!(res, Err(InvalidTransaction::ExhaustsResources.into())); - } - } - }); - } - - #[test] - fn block_weight_and_size_is_stored_per_tx() { - let xt = TestXt::new( - RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), - sign_extra(1, 0, 0), - ); - let x1 = TestXt::new( - RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), - sign_extra(1, 1, 0), - ); - let x2 = TestXt::new( - RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), - sign_extra(1, 2, 0), - ); - let len = xt.clone().encode().len() as u32; - let mut t = new_test_ext(1); - t.execute_with(|| { - // Block execution weight + on_initialize weight from custom module - let base_block_weight = Weight::from_parts(175, 0) + - ::BlockWeights::get().base_block; - - Executive::initialize_block(&Header::new( - 1, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - - assert_eq!(>::block_weight().total(), base_block_weight); - assert_eq!(>::all_extrinsics_len(), 0); - - assert!(Executive::apply_extrinsic(xt.clone()).unwrap().is_ok()); - assert!(Executive::apply_extrinsic(x1.clone()).unwrap().is_ok()); - assert!(Executive::apply_extrinsic(x2.clone()).unwrap().is_ok()); - - // default weight for `TestXt` == encoded length. - let extrinsic_weight = Weight::from_parts(len as u64, 0) + - ::BlockWeights::get() - .get(DispatchClass::Normal) - .base_extrinsic; - assert_eq!( - >::block_weight().total(), - base_block_weight + 3u64 * extrinsic_weight, - ); - assert_eq!(>::all_extrinsics_len(), 3 * len); - - let _ = >::finalize(); - // All extrinsics length cleaned on `System::finalize` - assert_eq!(>::all_extrinsics_len(), 0); - - // New Block - Executive::initialize_block(&Header::new( - 2, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - - // Block weight cleaned up on `System::initialize` - assert_eq!(>::block_weight().total(), base_block_weight); - }); - } - - #[test] - fn validate_unsigned() { - let valid = TestXt::new(RuntimeCall::Custom(custom::Call::allowed_unsigned {}), None); - let invalid = TestXt::new(RuntimeCall::Custom(custom::Call::unallowed_unsigned {}), None); - let mut t = new_test_ext(1); - - t.execute_with(|| { - assert_eq!( - Executive::validate_transaction( - TransactionSource::InBlock, - valid.clone(), - Default::default(), - ), - Ok(ValidTransaction::default()), - ); - assert_eq!( - Executive::validate_transaction( - TransactionSource::InBlock, - invalid.clone(), - Default::default(), - ), - Err(TransactionValidityError::Unknown(UnknownTransaction::NoUnsignedValidator)), - ); - assert_eq!(Executive::apply_extrinsic(valid), Ok(Err(DispatchError::BadOrigin))); - assert_eq!( - Executive::apply_extrinsic(invalid), - Err(TransactionValidityError::Unknown(UnknownTransaction::NoUnsignedValidator)) - ); - }); - } - - #[test] - fn can_not_pay_for_tx_fee_on_full_lock() { - let mut t = new_test_ext(1); - t.execute_with(|| { - as fungible::MutateFreeze>::set_freeze( - &(), - &1, - 110, - ) - .unwrap(); - let xt = TestXt::new( - RuntimeCall::System(frame_system::Call::remark { remark: vec![1u8] }), - sign_extra(1, 0, 0), - ); - Executive::initialize_block(&Header::new( - 1, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - - assert_eq!(Executive::apply_extrinsic(xt), Err(InvalidTransaction::Payment.into()),); - assert_eq!(>::total_balance(&1), 111); - }); - } - - #[test] - fn block_hooks_weight_is_stored() { - new_test_ext(1).execute_with(|| { - Executive::initialize_block(&Header::new_from_number(1)); - Executive::finalize_block(); - // NOTE: might need updates over time if new weights are introduced. - // For now it only accounts for the base block execution weight and - // the `on_initialize` weight defined in the custom test module. - assert_eq!( - >::block_weight().total(), - Weight::from_parts(175 + 175 + 10, 0) - ); - }) - } - - #[test] - fn runtime_upgraded_should_work() { - new_test_ext(1).execute_with(|| { - RuntimeVersionTestValues::mutate(|v| *v = Default::default()); - // It should be added at genesis - assert!(LastRuntimeUpgrade::::exists()); - assert!(!Executive::runtime_upgraded()); - - RuntimeVersionTestValues::mutate(|v| { - *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } - }); - assert!(Executive::runtime_upgraded()); - - RuntimeVersionTestValues::mutate(|v| { - *v = sp_version::RuntimeVersion { - spec_version: 1, - spec_name: "test".into(), - ..Default::default() - } - }); - assert!(Executive::runtime_upgraded()); - - RuntimeVersionTestValues::mutate(|v| { - *v = sp_version::RuntimeVersion { - spec_version: 0, - impl_version: 2, - ..Default::default() - } - }); - assert!(!Executive::runtime_upgraded()); - - LastRuntimeUpgrade::::take(); - assert!(Executive::runtime_upgraded()); - }) - } - - #[test] - fn last_runtime_upgrade_was_upgraded_works() { - let test_data = vec![ - (0, "", 1, "", true), - (1, "", 1, "", false), - (1, "", 1, "test", true), - (1, "", 0, "", false), - (1, "", 0, "test", true), - ]; - - for (spec_version, spec_name, c_spec_version, c_spec_name, result) in test_data { - let current = sp_version::RuntimeVersion { - spec_version: c_spec_version, - spec_name: c_spec_name.into(), - ..Default::default() - }; - - let last = LastRuntimeUpgradeInfo { - spec_version: spec_version.into(), - spec_name: spec_name.into(), - }; - - assert_eq!(result, last.was_upgraded(¤t)); - } - } - - #[test] - fn custom_runtime_upgrade_is_called_before_modules() { - new_test_ext(1).execute_with(|| { - // Make sure `on_runtime_upgrade` is called. - RuntimeVersionTestValues::mutate(|v| { - *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } - }); - - Executive::initialize_block(&Header::new( - 1, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - - assert_eq!(&sp_io::storage::get(TEST_KEY).unwrap()[..], *b"module"); - assert_eq!(sp_io::storage::get(CUSTOM_ON_RUNTIME_KEY).unwrap(), true.encode()); - assert_eq!( - Some(RuntimeVersionTestValues::get().into()), - LastRuntimeUpgrade::::get(), - ) - }); - } - - #[test] - fn event_from_runtime_upgrade_is_included() { - new_test_ext(1).execute_with(|| { - // Make sure `on_runtime_upgrade` is called. - RuntimeVersionTestValues::mutate(|v| { - *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } - }); - - // set block number to non zero so events are not excluded - System::set_block_number(1); - - Executive::initialize_block(&Header::new( - 2, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - - System::assert_last_event(frame_system::Event::::CodeUpdated.into()); - }); - } - - /// Regression test that ensures that the custom on runtime upgrade is called when executive is - /// used through the `ExecuteBlock` trait. - #[test] - fn custom_runtime_upgrade_is_called_when_using_execute_block_trait() { - let xt = TestXt::new( - RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), - sign_extra(1, 0, 0), - ); - - let header = new_test_ext(1).execute_with(|| { - // Make sure `on_runtime_upgrade` is called. - RuntimeVersionTestValues::mutate(|v| { - *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } - }); - - // Let's build some fake block. - Executive::initialize_block(&Header::new( - 1, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - - Executive::apply_extrinsic(xt.clone()).unwrap().unwrap(); - - Executive::finalize_block() - }); - - // Reset to get the correct new genesis below. - RuntimeVersionTestValues::mutate(|v| { - *v = sp_version::RuntimeVersion { spec_version: 0, ..Default::default() } - }); - - new_test_ext(1).execute_with(|| { - // Make sure `on_runtime_upgrade` is called. - RuntimeVersionTestValues::mutate(|v| { - *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } - }); - - >>::execute_block(Block::new(header, vec![xt])); - - assert_eq!(&sp_io::storage::get(TEST_KEY).unwrap()[..], *b"module"); - assert_eq!(sp_io::storage::get(CUSTOM_ON_RUNTIME_KEY).unwrap(), true.encode()); - }); - } - - #[test] - fn all_weights_are_recorded_correctly() { - // Reset to get the correct new genesis below. - RuntimeVersionTestValues::take(); - - new_test_ext(1).execute_with(|| { - // Make sure `on_runtime_upgrade` is called for maximum complexity - RuntimeVersionTestValues::mutate(|v| { - *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } - }); - - let block_number = 1; - - Executive::initialize_block(&Header::new( - block_number, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - - // Reset the last runtime upgrade info, to make the second call to `on_runtime_upgrade` - // succeed. - LastRuntimeUpgrade::::take(); - - // All weights that show up in the `initialize_block_impl` - let custom_runtime_upgrade_weight = CustomOnRuntimeUpgrade::on_runtime_upgrade(); - let runtime_upgrade_weight = - ::on_runtime_upgrade(); - let on_initialize_weight = - >::on_initialize(block_number); - let base_block_weight = - ::BlockWeights::get().base_block; - - // Weights are recorded correctly - assert_eq!( - frame_system::Pallet::::block_weight().total(), - custom_runtime_upgrade_weight + - runtime_upgrade_weight + - on_initialize_weight + base_block_weight, - ); - }); - } - - #[test] - fn offchain_worker_works_as_expected() { - new_test_ext(1).execute_with(|| { - let parent_hash = sp_core::H256::from([69u8; 32]); - let mut digest = Digest::default(); - digest.push(DigestItem::Seal([1, 2, 3, 4], vec![5, 6, 7, 8])); - - let header = - Header::new(1, H256::default(), H256::default(), parent_hash, digest.clone()); - - Executive::offchain_worker(&header); - - assert_eq!(digest, System::digest()); - assert_eq!(parent_hash, System::block_hash(0)); - assert_eq!(header.hash(), System::block_hash(1)); - }); - } - - #[test] - fn calculating_storage_root_twice_works() { - let call = RuntimeCall::Custom(custom::Call::calculate_storage_root {}); - let xt = TestXt::new(call, sign_extra(1, 0, 0)); - - let header = new_test_ext(1).execute_with(|| { - // Let's build some fake block. - Executive::initialize_block(&Header::new( - 1, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - - Executive::apply_extrinsic(xt.clone()).unwrap().unwrap(); - - Executive::finalize_block() - }); - - new_test_ext(1).execute_with(|| { - Executive::execute_block(Block::new(header, vec![xt])); - }); - } - - #[test] - #[should_panic(expected = "Invalid inherent position for extrinsic at index 1")] - fn invalid_inherent_position_fail() { - let xt1 = TestXt::new( - RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), - sign_extra(1, 0, 0), - ); - let xt2 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent_call {}), None); - - let header = new_test_ext(1).execute_with(|| { - // Let's build some fake block. - Executive::initialize_block(&Header::new( - 1, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - - Executive::apply_extrinsic(xt1.clone()).unwrap().unwrap(); - Executive::apply_extrinsic(xt2.clone()).unwrap().unwrap(); - - Executive::finalize_block() - }); - - new_test_ext(1).execute_with(|| { - Executive::execute_block(Block::new(header, vec![xt1, xt2])); - }); - } - - #[test] - fn valid_inherents_position_works() { - let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent_call {}), None); - let xt2 = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); - - let header = new_test_ext(1).execute_with(|| { - // Let's build some fake block. - Executive::initialize_block(&Header::new( - 1, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - - Executive::apply_extrinsic(xt1.clone()).unwrap().unwrap(); - Executive::apply_extrinsic(xt2.clone()).unwrap().unwrap(); - - Executive::finalize_block() - }); - - new_test_ext(1).execute_with(|| { - Executive::execute_block(Block::new(header, vec![xt1, xt2])); - }); - } - - #[test] - #[should_panic(expected = "A call was labelled as mandatory, but resulted in an Error.")] - fn invalid_inherents_fail_block_execution() { - let xt1 = - TestXt::new(RuntimeCall::Custom(custom::Call::inherent_call {}), sign_extra(1, 0, 0)); - - new_test_ext(1).execute_with(|| { - Executive::execute_block(Block::new( - Header::new( - 1, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - ), - vec![xt1], - )); - }); - } - - // Inherents are created by the runtime and don't need to be validated. - #[test] - fn inherents_fail_validate_block() { - let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent_call {}), None); - - new_test_ext(1).execute_with(|| { - assert_eq!( - Executive::validate_transaction(TransactionSource::External, xt1, H256::random()) - .unwrap_err(), - InvalidTransaction::MandatoryValidation.into() - ); - }) - } -} diff --git a/substrate/frame/executive/src/tests.rs b/substrate/frame/executive/src/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..70b55f6e85537246398b10f0880ec14681194b68 --- /dev/null +++ b/substrate/frame/executive/src/tests.rs @@ -0,0 +1,1389 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Test the `frame-executive` crate. + +use super::*; + +use sp_core::H256; +use sp_runtime::{ + generic::{DigestItem, Era}, + testing::{Block, Digest, Header}, + traits::{Block as BlockT, Header as HeaderT}, + transaction_validity::{ + InvalidTransaction, TransactionValidityError, UnknownTransaction, ValidTransaction, + }, + BuildStorage, DispatchError, +}; + +use frame_support::{ + assert_err, assert_ok, derive_impl, + migrations::MultiStepMigrator, + pallet_prelude::*, + parameter_types, + traits::{fungible, ConstU8, Currency, IsInherent}, + weights::{ConstantMultiplier, IdentityFee, RuntimeDbWeight, Weight, WeightMeter, WeightToFee}, +}; +use frame_system::{pallet_prelude::*, ChainContext, LastRuntimeUpgrade, LastRuntimeUpgradeInfo}; +use pallet_balances::Call as BalancesCall; +use pallet_transaction_payment::CurrencyAdapter; + +const TEST_KEY: &[u8] = b":test:key:"; + +#[frame_support::pallet(dev_mode)] +mod custom { + use super::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::hooks] + impl Hooks> for Pallet { + // module hooks. + // one with block number arg and one without + fn on_initialize(_: BlockNumberFor) -> Weight { + Weight::from_parts(175, 0) + } + + fn on_idle(_: BlockNumberFor, _: Weight) -> Weight { + Weight::from_parts(175, 0) + } + + fn on_poll(_: BlockNumberFor, _: &mut WeightMeter) {} + + fn on_finalize(_: BlockNumberFor) {} + + fn on_runtime_upgrade() -> Weight { + sp_io::storage::set(super::TEST_KEY, "module".as_bytes()); + Weight::from_parts(200, 0) + } + + fn offchain_worker(n: BlockNumberFor) { + assert_eq!(BlockNumberFor::::from(1u32), n); + } + } + + #[pallet::call] + impl Pallet { + pub fn some_function(origin: OriginFor) -> DispatchResult { + // NOTE: does not make any difference. + frame_system::ensure_signed(origin)?; + Ok(()) + } + + #[pallet::weight((200, DispatchClass::Operational))] + pub fn some_root_operation(origin: OriginFor) -> DispatchResult { + frame_system::ensure_root(origin)?; + Ok(()) + } + + pub fn some_unsigned_message(origin: OriginFor) -> DispatchResult { + frame_system::ensure_none(origin)?; + Ok(()) + } + + pub fn allowed_unsigned(origin: OriginFor) -> DispatchResult { + frame_system::ensure_root(origin)?; + Ok(()) + } + + pub fn unallowed_unsigned(origin: OriginFor) -> DispatchResult { + frame_system::ensure_root(origin)?; + Ok(()) + } + + #[pallet::weight((0, DispatchClass::Mandatory))] + pub fn inherent(origin: OriginFor) -> DispatchResult { + frame_system::ensure_none(origin)?; + Ok(()) + } + + pub fn calculate_storage_root(_origin: OriginFor) -> DispatchResult { + let root = sp_io::storage::root(sp_runtime::StateVersion::V1); + sp_io::storage::set("storage_root".as_bytes(), &root); + Ok(()) + } + } + + #[pallet::inherent] + impl ProvideInherent for Pallet { + type Call = Call; + + type Error = sp_inherents::MakeFatalError<()>; + + const INHERENT_IDENTIFIER: [u8; 8] = *b"test1234"; + + fn create_inherent(_data: &InherentData) -> Option { + None + } + + fn is_inherent(call: &Self::Call) -> bool { + *call == Call::::inherent {} + } + } + + #[pallet::validate_unsigned] + impl ValidateUnsigned for Pallet { + type Call = Call; + + // Inherent call is accepted for being dispatched + fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { + match call { + Call::allowed_unsigned { .. } => Ok(()), + Call::inherent { .. } => Ok(()), + _ => Err(UnknownTransaction::NoUnsignedValidator.into()), + } + } + + // Inherent call is not validated as unsigned + fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { + match call { + Call::allowed_unsigned { .. } => Ok(Default::default()), + _ => UnknownTransaction::NoUnsignedValidator.into(), + } + } + } +} + +#[frame_support::pallet(dev_mode)] +mod custom2 { + use super::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::hooks] + impl Hooks> for Pallet { + // module hooks. + // one with block number arg and one without + fn on_initialize(_: BlockNumberFor) -> Weight { + assert!( + !MockedSystemCallbacks::pre_inherent_called(), + "Pre inherent hook goes after on_initialize" + ); + + Weight::from_parts(0, 0) + } + + fn on_idle(_: BlockNumberFor, _: Weight) -> Weight { + assert!( + MockedSystemCallbacks::post_transactions_called(), + "Post transactions hook goes before after on_idle" + ); + Weight::from_parts(0, 0) + } + + fn on_finalize(_: BlockNumberFor) { + assert!( + MockedSystemCallbacks::post_transactions_called(), + "Post transactions hook goes before after on_finalize" + ); + } + + fn on_runtime_upgrade() -> Weight { + sp_io::storage::set(super::TEST_KEY, "module".as_bytes()); + Weight::from_parts(0, 0) + } + } + + #[pallet::call] + impl Pallet { + pub fn allowed_unsigned(origin: OriginFor) -> DispatchResult { + frame_system::ensure_root(origin)?; + Ok(()) + } + + pub fn some_call(_: OriginFor) -> DispatchResult { + assert!(MockedSystemCallbacks::post_inherent_called()); + assert!(!MockedSystemCallbacks::post_transactions_called()); + assert!(System::inherents_applied()); + + Ok(()) + } + + #[pallet::weight({0})] + pub fn optional_inherent(origin: OriginFor) -> DispatchResult { + frame_system::ensure_none(origin)?; + + assert!(MockedSystemCallbacks::pre_inherent_called()); + assert!(!MockedSystemCallbacks::post_inherent_called(), "Should not already be called"); + assert!(!System::inherents_applied()); + + Ok(()) + } + + #[pallet::weight((0, DispatchClass::Mandatory))] + pub fn inherent(origin: OriginFor) -> DispatchResult { + frame_system::ensure_none(origin)?; + + assert!(MockedSystemCallbacks::pre_inherent_called()); + assert!(!MockedSystemCallbacks::post_inherent_called(), "Should not already be called"); + assert!(!System::inherents_applied()); + + Ok(()) + } + } + + #[pallet::inherent] + impl ProvideInherent for Pallet { + type Call = Call; + + type Error = sp_inherents::MakeFatalError<()>; + + const INHERENT_IDENTIFIER: [u8; 8] = *b"test1235"; + + fn create_inherent(_data: &InherentData) -> Option { + None + } + + fn is_inherent(call: &Self::Call) -> bool { + matches!(call, Call::::inherent {} | Call::::optional_inherent {}) + } + } + + #[pallet::validate_unsigned] + impl ValidateUnsigned for Pallet { + type Call = Call; + + // Inherent call is accepted for being dispatched + fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { + match call { + Call::allowed_unsigned { .. } | + Call::optional_inherent { .. } | + Call::inherent { .. } => Ok(()), + _ => Err(UnknownTransaction::NoUnsignedValidator.into()), + } + } + + // Inherent call is not validated as unsigned + fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { + match call { + Call::allowed_unsigned { .. } => Ok(Default::default()), + _ => UnknownTransaction::NoUnsignedValidator.into(), + } + } + } +} + +frame_support::construct_runtime!( + pub struct Runtime + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + TransactionPayment: pallet_transaction_payment::{Pallet, Storage, Event}, + Custom: custom::{Pallet, Call, ValidateUnsigned, Inherent}, + Custom2: custom2::{Pallet, Call, ValidateUnsigned, Inherent}, + } +); + +parameter_types! { + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::builder() + .base_block(Weight::from_parts(10, 0)) + .for_class(DispatchClass::all(), |weights| weights.base_extrinsic = Weight::from_parts(5, 0)) + .for_class(DispatchClass::non_mandatory(), |weights| weights.max_total = Weight::from_parts(1024, u64::MAX).into()) + .build_or_panic(); + pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { + read: 10, + write: 100, + }; +} + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Runtime { + type BlockWeights = BlockWeights; + type RuntimeOrigin = RuntimeOrigin; + type Nonce = u64; + type RuntimeCall = RuntimeCall; + type Block = TestBlock; + type RuntimeEvent = RuntimeEvent; + type Version = RuntimeVersion; + type AccountData = pallet_balances::AccountData; + type PreInherents = MockedSystemCallbacks; + type PostInherents = MockedSystemCallbacks; + type PostTransactions = MockedSystemCallbacks; + type MultiBlockMigrator = MockedModeGetter; +} + +type Balance = u64; + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)] +impl pallet_balances::Config for Runtime { + type Balance = Balance; + type AccountStore = System; +} + +parameter_types! { + pub const TransactionByteFee: Balance = 0; +} +impl pallet_transaction_payment::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type OnChargeTransaction = CurrencyAdapter; + type OperationalFeeMultiplier = ConstU8<5>; + type WeightToFee = IdentityFee; + type LengthToFee = ConstantMultiplier; + type FeeMultiplierUpdate = (); +} + +impl custom::Config for Runtime {} +impl custom2::Config for Runtime {} + +pub struct RuntimeVersion; +impl frame_support::traits::Get for RuntimeVersion { + fn get() -> sp_version::RuntimeVersion { + RuntimeVersionTestValues::get().clone() + } +} + +parameter_types! { + pub static RuntimeVersionTestValues: sp_version::RuntimeVersion = + Default::default(); +} + +type SignedExtra = ( + frame_system::CheckEra, + frame_system::CheckNonce, + frame_system::CheckWeight, + pallet_transaction_payment::ChargeTransactionPayment, +); +type TestXt = sp_runtime::testing::TestXt; +type TestBlock = Block; + +// Will contain `true` when the custom runtime logic was called. +const CUSTOM_ON_RUNTIME_KEY: &[u8] = b":custom:on_runtime"; + +struct CustomOnRuntimeUpgrade; +impl OnRuntimeUpgrade for CustomOnRuntimeUpgrade { + fn on_runtime_upgrade() -> Weight { + sp_io::storage::set(TEST_KEY, "custom_upgrade".as_bytes()); + sp_io::storage::set(CUSTOM_ON_RUNTIME_KEY, &true.encode()); + System::deposit_event(frame_system::Event::CodeUpdated); + + assert_eq!(0, System::last_runtime_upgrade_spec_version()); + + Weight::from_parts(100, 0) + } +} + +type Executive = super::Executive< + Runtime, + Block, + ChainContext, + Runtime, + AllPalletsWithSystem, + CustomOnRuntimeUpgrade, +>; + +parameter_types! { + pub static SystemCallbacksCalled: u32 = 0; +} + +pub struct MockedSystemCallbacks; +impl PreInherents for MockedSystemCallbacks { + fn pre_inherents() { + assert_eq!(SystemCallbacksCalled::get(), 0); + SystemCallbacksCalled::set(1); + // Change the storage to modify the root hash: + frame_support::storage::unhashed::put(b":pre_inherent", b"0"); + } +} + +impl PostInherents for MockedSystemCallbacks { + fn post_inherents() { + assert_eq!(SystemCallbacksCalled::get(), 1); + SystemCallbacksCalled::set(2); + // Change the storage to modify the root hash: + frame_support::storage::unhashed::put(b":post_inherent", b"0"); + } +} + +impl PostTransactions for MockedSystemCallbacks { + fn post_transactions() { + assert_eq!(SystemCallbacksCalled::get(), 2); + SystemCallbacksCalled::set(3); + // Change the storage to modify the root hash: + frame_support::storage::unhashed::put(b":post_transaction", b"0"); + } +} + +impl MockedSystemCallbacks { + fn pre_inherent_called() -> bool { + SystemCallbacksCalled::get() >= 1 + } + + fn post_inherent_called() -> bool { + SystemCallbacksCalled::get() >= 2 + } + + fn post_transactions_called() -> bool { + SystemCallbacksCalled::get() >= 3 + } + + fn reset() { + SystemCallbacksCalled::set(0); + frame_support::storage::unhashed::kill(b":pre_inherent"); + frame_support::storage::unhashed::kill(b":post_inherent"); + frame_support::storage::unhashed::kill(b":post_transaction"); + } +} + +parameter_types! { + pub static MbmActive: bool = false; +} + +pub struct MockedModeGetter; +impl MultiStepMigrator for MockedModeGetter { + fn ongoing() -> bool { + MbmActive::get() + } + + fn step() -> Weight { + Weight::zero() + } +} + +fn extra(nonce: u64, fee: Balance) -> SignedExtra { + ( + frame_system::CheckEra::from(Era::Immortal), + frame_system::CheckNonce::from(nonce), + frame_system::CheckWeight::new(), + pallet_transaction_payment::ChargeTransactionPayment::from(fee), + ) +} + +fn sign_extra(who: u64, nonce: u64, fee: Balance) -> Option<(u64, SignedExtra)> { + Some((who, extra(nonce, fee))) +} + +fn call_transfer(dest: u64, value: u64) -> RuntimeCall { + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest, value }) +} + +#[test] +fn balance_transfer_dispatch_works() { + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + pallet_balances::GenesisConfig:: { balances: vec![(1, 211)] } + .assimilate_storage(&mut t) + .unwrap(); + let xt = TestXt::new(call_transfer(2, 69), sign_extra(1, 0, 0)); + let weight = xt.get_dispatch_info().weight + + ::BlockWeights::get() + .get(DispatchClass::Normal) + .base_extrinsic; + let fee: Balance = + ::WeightToFee::weight_to_fee(&weight); + let mut t = sp_io::TestExternalities::new(t); + t.execute_with(|| { + Executive::initialize_block(&Header::new_from_number(1)); + let r = Executive::apply_extrinsic(xt); + assert!(r.is_ok()); + assert_eq!(>::total_balance(&1), 142 - fee); + assert_eq!(>::total_balance(&2), 69); + }); +} + +fn new_test_ext(balance_factor: Balance) -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + pallet_balances::GenesisConfig:: { balances: vec![(1, 111 * balance_factor)] } + .assimilate_storage(&mut t) + .unwrap(); + let mut ext: sp_io::TestExternalities = t.into(); + ext.execute_with(|| { + SystemCallbacksCalled::set(0); + }); + ext +} + +fn new_test_ext_v0(balance_factor: Balance) -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + pallet_balances::GenesisConfig:: { balances: vec![(1, 111 * balance_factor)] } + .assimilate_storage(&mut t) + .unwrap(); + (t, sp_runtime::StateVersion::V0).into() +} + +#[test] +fn block_import_works() { + block_import_works_inner( + new_test_ext_v0(1), + array_bytes::hex_n_into_unchecked( + "4826d3bdf87dbbc883d2ab274cbe272f58ed94a904619b59953e48294d1142d2", + ), + ); + block_import_works_inner( + new_test_ext(1), + array_bytes::hex_n_into_unchecked( + "d6b465f5a50c9f8d5a6edc0f01d285a6b19030f097d3aaf1649b7be81649f118", + ), + ); +} +fn block_import_works_inner(mut ext: sp_io::TestExternalities, state_root: H256) { + ext.execute_with(|| { + Executive::execute_block(Block { + header: Header { + parent_hash: [69u8; 32].into(), + number: 1, + state_root, + extrinsics_root: array_bytes::hex_n_into_unchecked( + "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314", + ), + digest: Digest { logs: vec![] }, + }, + extrinsics: vec![], + }); + }); +} + +#[test] +#[should_panic] +fn block_import_of_bad_state_root_fails() { + new_test_ext(1).execute_with(|| { + Executive::execute_block(Block { + header: Header { + parent_hash: [69u8; 32].into(), + number: 1, + state_root: [0u8; 32].into(), + extrinsics_root: array_bytes::hex_n_into_unchecked( + "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314", + ), + digest: Digest { logs: vec![] }, + }, + extrinsics: vec![], + }); + }); +} + +#[test] +#[should_panic] +fn block_import_of_bad_extrinsic_root_fails() { + new_test_ext(1).execute_with(|| { + Executive::execute_block(Block { + header: Header { + parent_hash: [69u8; 32].into(), + number: 1, + state_root: array_bytes::hex_n_into_unchecked( + "75e7d8f360d375bbe91bcf8019c01ab6362448b4a89e3b329717eb9d910340e5", + ), + extrinsics_root: [0u8; 32].into(), + digest: Digest { logs: vec![] }, + }, + extrinsics: vec![], + }); + }); +} + +#[test] +fn bad_extrinsic_not_inserted() { + let mut t = new_test_ext(1); + // bad nonce check! + let xt = TestXt::new(call_transfer(33, 69), sign_extra(1, 30, 0)); + t.execute_with(|| { + Executive::initialize_block(&Header::new_from_number(1)); + assert_err!( + Executive::apply_extrinsic(xt), + TransactionValidityError::Invalid(InvalidTransaction::Future) + ); + assert_eq!(>::extrinsic_index(), Some(0)); + }); +} + +#[test] +fn block_weight_limit_enforced() { + let mut t = new_test_ext(10000); + // given: TestXt uses the encoded len as fixed Len: + let xt = TestXt::new( + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), + sign_extra(1, 0, 0), + ); + let encoded = xt.encode(); + let encoded_len = encoded.len() as u64; + // on_initialize weight + base block execution weight + let block_weights = ::BlockWeights::get(); + let base_block_weight = Weight::from_parts(175, 0) + block_weights.base_block; + let limit = block_weights.get(DispatchClass::Normal).max_total.unwrap() - base_block_weight; + let num_to_exhaust_block = limit.ref_time() / (encoded_len + 5); + t.execute_with(|| { + Executive::initialize_block(&Header::new_from_number(1)); + // Base block execution weight + `on_initialize` weight from the custom module. + assert_eq!(>::block_weight().total(), base_block_weight); + + for nonce in 0..=num_to_exhaust_block { + let xt = TestXt::new( + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), + sign_extra(1, nonce.into(), 0), + ); + let res = Executive::apply_extrinsic(xt); + if nonce != num_to_exhaust_block { + assert!(res.is_ok()); + assert_eq!( + >::block_weight().total(), + //--------------------- on_initialize + block_execution + extrinsic_base weight + Weight::from_parts((encoded_len + 5) * (nonce + 1), 0) + base_block_weight, + ); + assert_eq!( + >::extrinsic_index(), + Some(nonce as u32 + 1) + ); + } else { + assert_eq!(res, Err(InvalidTransaction::ExhaustsResources.into())); + } + } + }); +} + +#[test] +fn block_weight_and_size_is_stored_per_tx() { + let xt = TestXt::new( + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), + sign_extra(1, 0, 0), + ); + let x1 = TestXt::new( + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), + sign_extra(1, 1, 0), + ); + let x2 = TestXt::new( + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), + sign_extra(1, 2, 0), + ); + let len = xt.clone().encode().len() as u32; + let mut t = new_test_ext(1); + t.execute_with(|| { + // Block execution weight + on_initialize weight from custom module + let base_block_weight = Weight::from_parts(175, 0) + + ::BlockWeights::get().base_block; + + Executive::initialize_block(&Header::new_from_number(1)); + + assert_eq!(>::block_weight().total(), base_block_weight); + assert_eq!(>::all_extrinsics_len(), 0); + + assert!(Executive::apply_extrinsic(xt.clone()).unwrap().is_ok()); + assert!(Executive::apply_extrinsic(x1.clone()).unwrap().is_ok()); + assert!(Executive::apply_extrinsic(x2.clone()).unwrap().is_ok()); + + // default weight for `TestXt` == encoded length. + let extrinsic_weight = Weight::from_parts(len as u64, 0) + + ::BlockWeights::get() + .get(DispatchClass::Normal) + .base_extrinsic; + assert_eq!( + >::block_weight().total(), + base_block_weight + 3u64 * extrinsic_weight, + ); + assert_eq!(>::all_extrinsics_len(), 3 * len); + + let _ = >::finalize(); + // All extrinsics length cleaned on `System::finalize` + assert_eq!(>::all_extrinsics_len(), 0); + + // Reset to a new block. + SystemCallbacksCalled::take(); + Executive::initialize_block(&Header::new_from_number(2)); + + // Block weight cleaned up on `System::initialize` + assert_eq!(>::block_weight().total(), base_block_weight); + }); +} + +#[test] +fn validate_unsigned() { + let valid = TestXt::new(RuntimeCall::Custom(custom::Call::allowed_unsigned {}), None); + let invalid = TestXt::new(RuntimeCall::Custom(custom::Call::unallowed_unsigned {}), None); + let mut t = new_test_ext(1); + + t.execute_with(|| { + assert_eq!( + Executive::validate_transaction( + TransactionSource::InBlock, + valid.clone(), + Default::default(), + ), + Ok(ValidTransaction::default()), + ); + assert_eq!( + Executive::validate_transaction( + TransactionSource::InBlock, + invalid.clone(), + Default::default(), + ), + Err(TransactionValidityError::Unknown(UnknownTransaction::NoUnsignedValidator)), + ); + // Need to initialize the block before applying extrinsics for the `MockedSystemCallbacks` + // check. + Executive::initialize_block(&Header::new_from_number(1)); + assert_eq!(Executive::apply_extrinsic(valid), Ok(Err(DispatchError::BadOrigin))); + assert_eq!( + Executive::apply_extrinsic(invalid), + Err(TransactionValidityError::Unknown(UnknownTransaction::NoUnsignedValidator)) + ); + }); +} + +#[test] +fn can_not_pay_for_tx_fee_on_full_lock() { + let mut t = new_test_ext(1); + t.execute_with(|| { + as fungible::MutateFreeze>::set_freeze(&(), &1, 110) + .unwrap(); + let xt = TestXt::new( + RuntimeCall::System(frame_system::Call::remark { remark: vec![1u8] }), + sign_extra(1, 0, 0), + ); + Executive::initialize_block(&Header::new_from_number(1)); + + assert_eq!(Executive::apply_extrinsic(xt), Err(InvalidTransaction::Payment.into()),); + assert_eq!(>::total_balance(&1), 111); + }); +} + +#[test] +fn block_hooks_weight_is_stored() { + new_test_ext(1).execute_with(|| { + Executive::initialize_block(&Header::new_from_number(1)); + Executive::finalize_block(); + // NOTE: might need updates over time if new weights are introduced. + // For now it only accounts for the base block execution weight and + // the `on_initialize` weight defined in the custom test module. + assert_eq!( + >::block_weight().total(), + Weight::from_parts(175 + 175 + 10, 0) + ); + }) +} + +#[test] +fn runtime_upgraded_should_work() { + new_test_ext(1).execute_with(|| { + RuntimeVersionTestValues::mutate(|v| *v = Default::default()); + // It should be added at genesis + assert!(LastRuntimeUpgrade::::exists()); + assert!(!Executive::runtime_upgraded()); + + RuntimeVersionTestValues::mutate(|v| { + *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } + }); + assert!(Executive::runtime_upgraded()); + + RuntimeVersionTestValues::mutate(|v| { + *v = sp_version::RuntimeVersion { + spec_version: 1, + spec_name: "test".into(), + ..Default::default() + } + }); + assert!(Executive::runtime_upgraded()); + + RuntimeVersionTestValues::mutate(|v| { + *v = sp_version::RuntimeVersion { + spec_version: 0, + impl_version: 2, + ..Default::default() + } + }); + assert!(!Executive::runtime_upgraded()); + + LastRuntimeUpgrade::::take(); + assert!(Executive::runtime_upgraded()); + }) +} + +#[test] +fn last_runtime_upgrade_was_upgraded_works() { + let test_data = vec![ + (0, "", 1, "", true), + (1, "", 1, "", false), + (1, "", 1, "test", true), + (1, "", 0, "", false), + (1, "", 0, "test", true), + ]; + + for (spec_version, spec_name, c_spec_version, c_spec_name, result) in test_data { + let current = sp_version::RuntimeVersion { + spec_version: c_spec_version, + spec_name: c_spec_name.into(), + ..Default::default() + }; + + let last = LastRuntimeUpgradeInfo { + spec_version: spec_version.into(), + spec_name: spec_name.into(), + }; + + assert_eq!(result, last.was_upgraded(¤t)); + } +} + +#[test] +fn custom_runtime_upgrade_is_called_before_modules() { + new_test_ext(1).execute_with(|| { + // Make sure `on_runtime_upgrade` is called. + RuntimeVersionTestValues::mutate(|v| { + *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } + }); + + Executive::initialize_block(&Header::new_from_number(1)); + + assert_eq!(&sp_io::storage::get(TEST_KEY).unwrap()[..], *b"module"); + assert_eq!(sp_io::storage::get(CUSTOM_ON_RUNTIME_KEY).unwrap(), true.encode()); + assert_eq!( + Some(RuntimeVersionTestValues::get().into()), + LastRuntimeUpgrade::::get(), + ) + }); +} + +#[test] +fn event_from_runtime_upgrade_is_included() { + new_test_ext(1).execute_with(|| { + // Make sure `on_runtime_upgrade` is called. + RuntimeVersionTestValues::mutate(|v| { + *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } + }); + + // set block number to non zero so events are not excluded + System::set_block_number(1); + + Executive::initialize_block(&Header::new_from_number(2)); + System::assert_last_event(frame_system::Event::::CodeUpdated.into()); + }); +} + +/// Regression test that ensures that the custom on runtime upgrade is called when executive is +/// used through the `ExecuteBlock` trait. +#[test] +fn custom_runtime_upgrade_is_called_when_using_execute_block_trait() { + let xt = TestXt::new( + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), + sign_extra(1, 0, 0), + ); + + let header = new_test_ext(1).execute_with(|| { + // Make sure `on_runtime_upgrade` is called. + RuntimeVersionTestValues::mutate(|v| { + *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } + }); + + // Let's build some fake block. + Executive::initialize_block(&Header::new_from_number(1)); + + Executive::apply_extrinsic(xt.clone()).unwrap().unwrap(); + + Executive::finalize_block() + }); + + // Reset to get the correct new genesis below. + RuntimeVersionTestValues::mutate(|v| { + *v = sp_version::RuntimeVersion { spec_version: 0, ..Default::default() } + }); + + new_test_ext(1).execute_with(|| { + // Make sure `on_runtime_upgrade` is called. + RuntimeVersionTestValues::mutate(|v| { + *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } + }); + + >>::execute_block(Block::new(header, vec![xt])); + + assert_eq!(&sp_io::storage::get(TEST_KEY).unwrap()[..], *b"module"); + assert_eq!(sp_io::storage::get(CUSTOM_ON_RUNTIME_KEY).unwrap(), true.encode()); + }); +} + +#[test] +fn all_weights_are_recorded_correctly() { + // Reset to get the correct new genesis below. + RuntimeVersionTestValues::take(); + + new_test_ext(1).execute_with(|| { + // Make sure `on_runtime_upgrade` is called for maximum complexity + RuntimeVersionTestValues::mutate(|v| { + *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } + }); + + let block_number = 1; + + Executive::initialize_block(&Header::new_from_number(block_number)); + + // Reset the last runtime upgrade info, to make the second call to `on_runtime_upgrade` + // succeed. + LastRuntimeUpgrade::::take(); + MockedSystemCallbacks::reset(); + + // All weights that show up in the `initialize_block_impl` + let custom_runtime_upgrade_weight = CustomOnRuntimeUpgrade::on_runtime_upgrade(); + let runtime_upgrade_weight = + ::on_runtime_upgrade(); + let on_initialize_weight = + >::on_initialize(block_number); + let base_block_weight = ::BlockWeights::get().base_block; + + // Weights are recorded correctly + assert_eq!( + frame_system::Pallet::::block_weight().total(), + custom_runtime_upgrade_weight + + runtime_upgrade_weight + + on_initialize_weight + + base_block_weight, + ); + }); +} + +#[test] +fn offchain_worker_works_as_expected() { + new_test_ext(1).execute_with(|| { + let parent_hash = sp_core::H256::from([69u8; 32]); + let mut digest = Digest::default(); + digest.push(DigestItem::Seal([1, 2, 3, 4], vec![5, 6, 7, 8])); + + let header = Header::new(1, H256::default(), H256::default(), parent_hash, digest.clone()); + + Executive::offchain_worker(&header); + + assert_eq!(digest, System::digest()); + assert_eq!(parent_hash, System::block_hash(0)); + assert_eq!(header.hash(), System::block_hash(1)); + }); +} + +#[test] +fn calculating_storage_root_twice_works() { + let call = RuntimeCall::Custom(custom::Call::calculate_storage_root {}); + let xt = TestXt::new(call, sign_extra(1, 0, 0)); + + let header = new_test_ext(1).execute_with(|| { + // Let's build some fake block. + Executive::initialize_block(&Header::new_from_number(1)); + + Executive::apply_extrinsic(xt.clone()).unwrap().unwrap(); + + Executive::finalize_block() + }); + + new_test_ext(1).execute_with(|| { + Executive::execute_block(Block::new(header, vec![xt])); + }); +} + +#[test] +#[should_panic(expected = "Invalid inherent position for extrinsic at index 1")] +fn invalid_inherent_position_fail() { + let xt1 = TestXt::new( + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), + sign_extra(1, 0, 0), + ); + let xt2 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); + + let header = new_test_ext(1).execute_with(|| { + // Let's build some fake block. + Executive::initialize_block(&Header::new_from_number(1)); + + Executive::apply_extrinsic(xt1.clone()).unwrap().unwrap(); + Executive::apply_extrinsic(xt2.clone()).unwrap().unwrap(); + + Executive::finalize_block() + }); + + new_test_ext(1).execute_with(|| { + Executive::execute_block(Block::new(header, vec![xt1, xt2])); + }); +} + +#[test] +fn valid_inherents_position_works() { + let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); + let xt2 = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); + + let header = new_test_ext(1).execute_with(|| { + // Let's build some fake block. + Executive::initialize_block(&Header::new_from_number(1)); + + Executive::apply_extrinsic(xt1.clone()).unwrap().unwrap(); + Executive::apply_extrinsic(xt2.clone()).unwrap().unwrap(); + + Executive::finalize_block() + }); + + new_test_ext(1).execute_with(|| { + Executive::execute_block(Block::new(header, vec![xt1, xt2])); + }); +} + +#[test] +#[should_panic(expected = "A call was labelled as mandatory, but resulted in an Error.")] +fn invalid_inherents_fail_block_execution() { + let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), sign_extra(1, 0, 0)); + + new_test_ext(1).execute_with(|| { + Executive::execute_block(Block::new( + Header::new(1, H256::default(), H256::default(), [69u8; 32].into(), Digest::default()), + vec![xt1], + )); + }); +} + +// Inherents are created by the runtime and don't need to be validated. +#[test] +fn inherents_fail_validate_block() { + let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); + + new_test_ext(1).execute_with(|| { + assert_eq!( + Executive::validate_transaction(TransactionSource::External, xt1, H256::random()) + .unwrap_err(), + InvalidTransaction::MandatoryValidation.into() + ); + }) +} + +/// Inherents still work while `initialize_block` forbids transactions. +#[test] +fn inherents_ok_while_exts_forbidden_works() { + let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); + + let header = new_test_ext(1).execute_with(|| { + Executive::initialize_block(&Header::new_from_number(1)); + + Executive::apply_extrinsic(xt1.clone()).unwrap().unwrap(); + // This is not applied: + Executive::finalize_block() + }); + + new_test_ext(1).execute_with(|| { + // Tell `initialize_block` to forbid extrinsics: + Executive::execute_block(Block::new(header, vec![xt1])); + }); +} + +/// Refuses to import blocks with transactions during `OnlyInherents` mode. +#[test] +#[should_panic = "Only inherents are allowed in this block"] +fn transactions_in_only_inherents_block_errors() { + let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); + let xt2 = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); + + let header = new_test_ext(1).execute_with(|| { + Executive::initialize_block(&Header::new_from_number(1)); + + Executive::apply_extrinsic(xt1.clone()).unwrap().unwrap(); + Executive::apply_extrinsic(xt2.clone()).unwrap().unwrap(); + + Executive::finalize_block() + }); + + new_test_ext(1).execute_with(|| { + MbmActive::set(true); + Executive::execute_block(Block::new(header, vec![xt1, xt2])); + }); +} + +/// Same as above but no error. +#[test] +fn transactions_in_normal_block_works() { + let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); + let xt2 = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); + + let header = new_test_ext(1).execute_with(|| { + Executive::initialize_block(&Header::new_from_number(1)); + + Executive::apply_extrinsic(xt1.clone()).unwrap().unwrap(); + Executive::apply_extrinsic(xt2.clone()).unwrap().unwrap(); + + Executive::finalize_block() + }); + + new_test_ext(1).execute_with(|| { + // Tell `initialize_block` to forbid extrinsics: + Executive::execute_block(Block::new(header, vec![xt1, xt2])); + }); +} + +#[test] +#[cfg(feature = "try-runtime")] +fn try_execute_block_works() { + let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); + let xt2 = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); + + let header = new_test_ext(1).execute_with(|| { + Executive::initialize_block(&Header::new_from_number(1)); + + Executive::apply_extrinsic(xt1.clone()).unwrap().unwrap(); + Executive::apply_extrinsic(xt2.clone()).unwrap().unwrap(); + + Executive::finalize_block() + }); + + new_test_ext(1).execute_with(|| { + Executive::try_execute_block( + Block::new(header, vec![xt1, xt2]), + true, + true, + frame_try_runtime::TryStateSelect::All, + ) + .unwrap(); + }); +} + +/// Same as `extrinsic_while_exts_forbidden_errors` but using the try-runtime function. +#[test] +#[cfg(feature = "try-runtime")] +#[should_panic = "Only inherents allowed"] +fn try_execute_tx_forbidden_errors() { + let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); + let xt2 = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); + + let header = new_test_ext(1).execute_with(|| { + // Let's build some fake block. + Executive::initialize_block(&Header::new_from_number(1)); + + Executive::apply_extrinsic(xt1.clone()).unwrap().unwrap(); + Executive::apply_extrinsic(xt2.clone()).unwrap().unwrap(); + + Executive::finalize_block() + }); + + new_test_ext(1).execute_with(|| { + MbmActive::set(true); + Executive::try_execute_block( + Block::new(header, vec![xt1, xt2]), + true, + true, + frame_try_runtime::TryStateSelect::All, + ) + .unwrap(); + }); +} + +/// Check that `ensure_inherents_are_first` reports the correct indices. +#[test] +fn ensure_inherents_are_first_works() { + let in1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); + let in2 = TestXt::new(RuntimeCall::Custom2(custom2::Call::inherent {}), None); + let xt2 = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); + + // Mocked empty header: + let header = new_test_ext(1).execute_with(|| { + Executive::initialize_block(&Header::new_from_number(1)); + Executive::finalize_block() + }); + + new_test_ext(1).execute_with(|| { + assert_ok!(Runtime::ensure_inherents_are_first(&Block::new(header.clone(), vec![]),), 0); + assert_ok!( + Runtime::ensure_inherents_are_first(&Block::new(header.clone(), vec![xt2.clone()]),), + 0 + ); + assert_ok!( + Runtime::ensure_inherents_are_first(&Block::new(header.clone(), vec![in1.clone()])), + 1 + ); + assert_ok!( + Runtime::ensure_inherents_are_first(&Block::new( + header.clone(), + vec![in1.clone(), xt2.clone()] + ),), + 1 + ); + assert_ok!( + Runtime::ensure_inherents_are_first(&Block::new( + header.clone(), + vec![in2.clone(), in1.clone(), xt2.clone()] + ),), + 2 + ); + + assert_eq!( + Runtime::ensure_inherents_are_first(&Block::new( + header.clone(), + vec![xt2.clone(), in1.clone()] + ),), + Err(1) + ); + assert_eq!( + Runtime::ensure_inherents_are_first(&Block::new( + header.clone(), + vec![xt2.clone(), xt2.clone(), in1.clone()] + ),), + Err(2) + ); + assert_eq!( + Runtime::ensure_inherents_are_first(&Block::new( + header.clone(), + vec![xt2.clone(), xt2.clone(), xt2.clone(), in2.clone()] + ),), + Err(3) + ); + }); +} + +/// Check that block execution rejects blocks with transactions in them while MBMs are active and +/// also that all the system callbacks are called correctly. +#[test] +fn callbacks_in_block_execution_works() { + callbacks_in_block_execution_works_inner(false); + callbacks_in_block_execution_works_inner(true); +} + +fn callbacks_in_block_execution_works_inner(mbms_active: bool) { + MbmActive::set(mbms_active); + + for (n_in, n_tx) in (0..10usize).zip(0..10usize) { + let mut extrinsics = Vec::new(); + + let header = new_test_ext(10).execute_with(|| { + MockedSystemCallbacks::reset(); + Executive::initialize_block(&Header::new_from_number(1)); + assert_eq!(SystemCallbacksCalled::get(), 1); + + for i in 0..n_in { + let xt = if i % 2 == 0 { + TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None) + } else { + TestXt::new(RuntimeCall::Custom2(custom2::Call::optional_inherent {}), None) + }; + Executive::apply_extrinsic(xt.clone()).unwrap().unwrap(); + extrinsics.push(xt); + } + + for t in 0..n_tx { + let xt = TestXt::new( + RuntimeCall::Custom2(custom2::Call::some_call {}), + sign_extra(1, t as u64, 0), + ); + // Extrinsics can be applied even when MBMs are active. Only the `execute_block` + // will reject it. + Executive::apply_extrinsic(xt.clone()).unwrap().unwrap(); + extrinsics.push(xt); + } + + Executive::finalize_block() + }); + assert_eq!(SystemCallbacksCalled::get(), 3); + + new_test_ext(10).execute_with(|| { + let header = std::panic::catch_unwind(|| { + Executive::execute_block(Block::new(header, extrinsics)); + }); + + match header { + Err(e) => { + let err = e.downcast::<&str>().unwrap(); + assert_eq!(*err, "Only inherents are allowed in this block"); + assert!( + MbmActive::get() && n_tx > 0, + "Transactions should be rejected when MBMs are active" + ); + }, + Ok(_) => { + assert_eq!(SystemCallbacksCalled::get(), 3); + assert!( + !MbmActive::get() || n_tx == 0, + "MBMs should be deactivated after finalization" + ); + }, + } + }); + } +} + +#[test] +fn post_inherent_called_after_all_inherents() { + let in1 = TestXt::new(RuntimeCall::Custom2(custom2::Call::inherent {}), None); + let xt1 = TestXt::new(RuntimeCall::Custom2(custom2::Call::some_call {}), sign_extra(1, 0, 0)); + + let header = new_test_ext(1).execute_with(|| { + // Let's build some fake block. + Executive::initialize_block(&Header::new_from_number(1)); + + Executive::apply_extrinsic(in1.clone()).unwrap().unwrap(); + Executive::apply_extrinsic(xt1.clone()).unwrap().unwrap(); + + Executive::finalize_block() + }); + + #[cfg(feature = "try-runtime")] + new_test_ext(1).execute_with(|| { + Executive::try_execute_block( + Block::new(header.clone(), vec![in1.clone(), xt1.clone()]), + true, + true, + frame_try_runtime::TryStateSelect::All, + ) + .unwrap(); + assert!(MockedSystemCallbacks::post_transactions_called()); + }); + + new_test_ext(1).execute_with(|| { + MockedSystemCallbacks::reset(); + Executive::execute_block(Block::new(header, vec![in1, xt1])); + assert!(MockedSystemCallbacks::post_transactions_called()); + }); +} + +/// Regression test for AppSec finding #40. +#[test] +fn post_inherent_called_after_all_optional_inherents() { + let in1 = TestXt::new(RuntimeCall::Custom2(custom2::Call::optional_inherent {}), None); + let xt1 = TestXt::new(RuntimeCall::Custom2(custom2::Call::some_call {}), sign_extra(1, 0, 0)); + + let header = new_test_ext(1).execute_with(|| { + // Let's build some fake block. + Executive::initialize_block(&Header::new_from_number(1)); + + Executive::apply_extrinsic(in1.clone()).unwrap().unwrap(); + Executive::apply_extrinsic(xt1.clone()).unwrap().unwrap(); + + Executive::finalize_block() + }); + + #[cfg(feature = "try-runtime")] + new_test_ext(1).execute_with(|| { + Executive::try_execute_block( + Block::new(header.clone(), vec![in1.clone(), xt1.clone()]), + true, + true, + frame_try_runtime::TryStateSelect::All, + ) + .unwrap(); + assert!(MockedSystemCallbacks::post_transactions_called()); + }); + + new_test_ext(1).execute_with(|| { + MockedSystemCallbacks::reset(); + Executive::execute_block(Block::new(header, vec![in1, xt1])); + assert!(MockedSystemCallbacks::post_transactions_called()); + }); +} + +#[test] +fn is_inherent_works() { + let ext = TestXt::new(RuntimeCall::Custom2(custom2::Call::inherent {}), None); + assert!(Runtime::is_inherent(&ext)); + let ext = TestXt::new(RuntimeCall::Custom2(custom2::Call::optional_inherent {}), None); + assert!(Runtime::is_inherent(&ext)); + + let ext = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); + assert!(!Runtime::is_inherent(&ext)); + + let ext = TestXt::new(RuntimeCall::Custom2(custom2::Call::allowed_unsigned {}), None); + assert!(!Runtime::is_inherent(&ext), "Unsigned ext are not automatically inherents"); +} diff --git a/substrate/frame/fast-unstake/src/migrations.rs b/substrate/frame/fast-unstake/src/migrations.rs index 564388407045e0ae86b87c7823f3f07c5613e012..97ad86bfff42bf6ee1e258f5663d9bb53144373c 100644 --- a/substrate/frame/fast-unstake/src/migrations.rs +++ b/substrate/frame/fast-unstake/src/migrations.rs @@ -33,12 +33,12 @@ pub mod v1 { pub struct MigrateToV1(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV1 { fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); + let current = Pallet::::in_code_storage_version(); let onchain = Pallet::::on_chain_storage_version(); log!( info, - "Running migration with current storage version {:?} / onchain {:?}", + "Running migration with in-code storage version {:?} / onchain {:?}", current, onchain ); diff --git a/substrate/frame/grandpa/src/lib.rs b/substrate/frame/grandpa/src/lib.rs index 0b9f2b3582792e9d93695d34656912a8d70a131f..90bcd8721dfa1f6d091edc34da883c2172d9f6ca 100644 --- a/substrate/frame/grandpa/src/lib.rs +++ b/substrate/frame/grandpa/src/lib.rs @@ -73,7 +73,7 @@ pub mod pallet { use frame_support::{dispatch::DispatchResult, pallet_prelude::*}; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(5); #[pallet::pallet] diff --git a/substrate/frame/identity/src/benchmarking.rs b/substrate/frame/identity/src/benchmarking.rs index fe2fb0b048933c13425c0127b752bf773e00bea2..e0b45eeecd1cf5494f52f83ec835037075155ea7 100644 --- a/substrate/frame/identity/src/benchmarking.rs +++ b/substrate/frame/identity/src/benchmarking.rs @@ -23,9 +23,7 @@ use super::*; use crate::Pallet as Identity; use codec::Encode; -use frame_benchmarking::{ - account, impl_benchmark_test_suite, v2::*, whitelisted_caller, BenchmarkError, -}; +use frame_benchmarking::{account, v2::*, whitelisted_caller, BenchmarkError}; use frame_support::{ assert_ok, ensure, traits::{EnsureOrigin, Get, OnFinalize, OnInitialize}, diff --git a/substrate/frame/im-online/src/lib.rs b/substrate/frame/im-online/src/lib.rs index 1de89dd00c8121c64a19f8b45f24fc6500126dce..239b47834d1f8b1625a186e4a8f90e9861bfa303 100644 --- a/substrate/frame/im-online/src/lib.rs +++ b/substrate/frame/im-online/src/lib.rs @@ -251,7 +251,7 @@ type OffchainResult = Result>>; pub mod pallet { use super::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] @@ -338,26 +338,22 @@ pub mod pallet { /// progress estimate from `NextSessionRotation`, as those estimates should be /// more accurate then the value we calculate for `HeartbeatAfter`. #[pallet::storage] - #[pallet::getter(fn heartbeat_after)] - pub(super) type HeartbeatAfter = StorageValue<_, BlockNumberFor, ValueQuery>; + pub type HeartbeatAfter = StorageValue<_, BlockNumberFor, ValueQuery>; /// The current set of keys that may issue a heartbeat. #[pallet::storage] - #[pallet::getter(fn keys)] - pub(super) type Keys = + pub type Keys = StorageValue<_, WeakBoundedVec, ValueQuery>; /// For each session index, we keep a mapping of `SessionIndex` and `AuthIndex`. #[pallet::storage] - #[pallet::getter(fn received_heartbeats)] - pub(super) type ReceivedHeartbeats = + pub type ReceivedHeartbeats = StorageDoubleMap<_, Twox64Concat, SessionIndex, Twox64Concat, AuthIndex, bool>; /// For each session index, we keep a mapping of `ValidatorId` to the /// number of blocks authored by the given authority. #[pallet::storage] - #[pallet::getter(fn authored_blocks)] - pub(super) type AuthoredBlocks = StorageDoubleMap< + pub type AuthoredBlocks = StorageDoubleMap< _, Twox64Concat, SessionIndex, diff --git a/substrate/frame/im-online/src/migration.rs b/substrate/frame/im-online/src/migration.rs index 0d2c0a055b6d88855bc8783b65e06f0427197208..754a2e672e6cfaec32ed0e44c9af54469fddbb04 100644 --- a/substrate/frame/im-online/src/migration.rs +++ b/substrate/frame/im-online/src/migration.rs @@ -72,7 +72,7 @@ pub mod v1 { if StorageVersion::get::>() != 0 { log::warn!( target: TARGET, - "Skipping migration because current storage version is not 0" + "Skipping migration because in-code storage version is not 0" ); return weight } diff --git a/substrate/frame/im-online/src/tests.rs b/substrate/frame/im-online/src/tests.rs index 79036760c2d427c2895163b150f3806bf17222f2..5e5212e1d562cbf331027a1206c3cc0b91be42de 100644 --- a/substrate/frame/im-online/src/tests.rs +++ b/substrate/frame/im-online/src/tests.rs @@ -26,10 +26,7 @@ use sp_core::offchain::{ testing::{TestOffchainExt, TestTransactionPoolExt}, OffchainDbExt, OffchainWorkerExt, TransactionPoolExt, }; -use sp_runtime::{ - testing::UintAuthorityId, - transaction_validity::{InvalidTransaction, TransactionValidityError}, -}; +use sp_runtime::testing::UintAuthorityId; #[test] fn test_unresponsiveness_slash_fraction() { @@ -267,14 +264,14 @@ fn should_cleanup_received_heartbeats_on_session_end() { let _ = heartbeat(1, 2, 0, 1.into(), Session::validators()).unwrap(); // the heartbeat is stored - assert!(!ImOnline::received_heartbeats(&2, &0).is_none()); + assert!(!super::pallet::ReceivedHeartbeats::::get(2, 0).is_none()); advance_session(); // after the session has ended we have already processed the heartbeat // message, so any messages received on the previous session should have // been pruned. - assert!(ImOnline::received_heartbeats(&2, &0).is_none()); + assert!(super::pallet::ReceivedHeartbeats::::get(2, 0).is_none()); }); } diff --git a/substrate/frame/lottery/src/benchmarking.rs b/substrate/frame/lottery/src/benchmarking.rs index 1510d250dbeaf6814a35f1262f08ba727b340ee2..123b425b976f3b5b2842e748498513611dca5bc2 100644 --- a/substrate/frame/lottery/src/benchmarking.rs +++ b/substrate/frame/lottery/src/benchmarking.rs @@ -23,7 +23,6 @@ use super::*; use crate::Pallet as Lottery; use frame_benchmarking::{ - impl_benchmark_test_suite, v1::{account, whitelisted_caller, BenchmarkError}, v2::*, }; diff --git a/substrate/frame/membership/src/lib.rs b/substrate/frame/membership/src/lib.rs index 19b5e54d308cb7a1546136ff1266de67ec6d80fa..f4ac697130de87ea48fdb6be522289c6368d10c8 100644 --- a/substrate/frame/membership/src/lib.rs +++ b/substrate/frame/membership/src/lib.rs @@ -27,7 +27,7 @@ use frame_support::{ traits::{ChangeMembers, Contains, Get, InitializeMembers, SortedMembers}, BoundedVec, }; -use sp_runtime::traits::StaticLookup; +use sp_runtime::traits::{StaticLookup, UniqueSaturatedInto}; use sp_std::prelude::*; pub mod migrations; @@ -46,7 +46,7 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); #[pallet::pallet] @@ -163,12 +163,16 @@ pub mod pallet { /// /// May only be called from `T::AddOrigin`. #[pallet::call_index(0)] - #[pallet::weight({50_000_000})] - pub fn add_member(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { + #[pallet::weight(T::WeightInfo::add_member(T::MaxMembers::get()))] + pub fn add_member( + origin: OriginFor, + who: AccountIdLookupOf, + ) -> DispatchResultWithPostInfo { T::AddOrigin::ensure_origin(origin)?; let who = T::Lookup::lookup(who)?; let mut members = >::get(); + let init_length = members.len(); let location = members.binary_search(&who).err().ok_or(Error::::AlreadyMember)?; members .try_insert(location, who.clone()) @@ -179,19 +183,24 @@ pub mod pallet { T::MembershipChanged::change_members_sorted(&[who], &[], &members[..]); Self::deposit_event(Event::MemberAdded); - Ok(()) + + Ok(Some(T::WeightInfo::add_member(init_length as u32)).into()) } /// Remove a member `who` from the set. /// /// May only be called from `T::RemoveOrigin`. #[pallet::call_index(1)] - #[pallet::weight({50_000_000})] - pub fn remove_member(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { + #[pallet::weight(T::WeightInfo::remove_member(T::MaxMembers::get()))] + pub fn remove_member( + origin: OriginFor, + who: AccountIdLookupOf, + ) -> DispatchResultWithPostInfo { T::RemoveOrigin::ensure_origin(origin)?; let who = T::Lookup::lookup(who)?; let mut members = >::get(); + let init_length = members.len(); let location = members.binary_search(&who).ok().ok_or(Error::::NotMember)?; members.remove(location); @@ -201,7 +210,7 @@ pub mod pallet { Self::rejig_prime(&members); Self::deposit_event(Event::MemberRemoved); - Ok(()) + Ok(Some(T::WeightInfo::remove_member(init_length as u32)).into()) } /// Swap out one member `remove` for another `add`. @@ -210,18 +219,18 @@ pub mod pallet { /// /// Prime membership is *not* passed from `remove` to `add`, if extant. #[pallet::call_index(2)] - #[pallet::weight({50_000_000})] + #[pallet::weight(T::WeightInfo::swap_member(T::MaxMembers::get()))] pub fn swap_member( origin: OriginFor, remove: AccountIdLookupOf, add: AccountIdLookupOf, - ) -> DispatchResult { + ) -> DispatchResultWithPostInfo { T::SwapOrigin::ensure_origin(origin)?; let remove = T::Lookup::lookup(remove)?; let add = T::Lookup::lookup(add)?; if remove == add { - return Ok(()) + return Ok(().into()); } let mut members = >::get(); @@ -236,7 +245,7 @@ pub mod pallet { Self::rejig_prime(&members); Self::deposit_event(Event::MembersSwapped); - Ok(()) + Ok(Some(T::WeightInfo::swap_member(members.len() as u32)).into()) } /// Change the membership to a new set, disregarding the existing membership. Be nice and @@ -244,7 +253,7 @@ pub mod pallet { /// /// May only be called from `T::ResetOrigin`. #[pallet::call_index(3)] - #[pallet::weight({50_000_000})] + #[pallet::weight(T::WeightInfo::reset_members(members.len().unique_saturated_into()))] pub fn reset_members(origin: OriginFor, members: Vec) -> DispatchResult { T::ResetOrigin::ensure_origin(origin)?; @@ -267,56 +276,65 @@ pub mod pallet { /// /// Prime membership is passed from the origin account to `new`, if extant. #[pallet::call_index(4)] - #[pallet::weight({50_000_000})] - pub fn change_key(origin: OriginFor, new: AccountIdLookupOf) -> DispatchResult { + #[pallet::weight(T::WeightInfo::change_key(T::MaxMembers::get()))] + pub fn change_key( + origin: OriginFor, + new: AccountIdLookupOf, + ) -> DispatchResultWithPostInfo { let remove = ensure_signed(origin)?; let new = T::Lookup::lookup(new)?; - if remove != new { - let mut members = >::get(); - let location = - members.binary_search(&remove).ok().ok_or(Error::::NotMember)?; - let _ = members.binary_search(&new).err().ok_or(Error::::AlreadyMember)?; - members[location] = new.clone(); - members.sort(); - - >::put(&members); - - T::MembershipChanged::change_members_sorted( - &[new.clone()], - &[remove.clone()], - &members[..], - ); - - if Prime::::get() == Some(remove) { - Prime::::put(&new); - T::MembershipChanged::set_prime(Some(new)); - } + if remove == new { + return Ok(().into()); + } + + let mut members = >::get(); + let members_length = members.len() as u32; + let location = members.binary_search(&remove).ok().ok_or(Error::::NotMember)?; + let _ = members.binary_search(&new).err().ok_or(Error::::AlreadyMember)?; + members[location] = new.clone(); + members.sort(); + + >::put(&members); + + T::MembershipChanged::change_members_sorted( + &[new.clone()], + &[remove.clone()], + &members[..], + ); + + if Prime::::get() == Some(remove) { + Prime::::put(&new); + T::MembershipChanged::set_prime(Some(new)); } Self::deposit_event(Event::KeyChanged); - Ok(()) + Ok(Some(T::WeightInfo::change_key(members_length)).into()) } /// Set the prime member. Must be a current member. /// /// May only be called from `T::PrimeOrigin`. #[pallet::call_index(5)] - #[pallet::weight({50_000_000})] - pub fn set_prime(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { + #[pallet::weight(T::WeightInfo::set_prime(T::MaxMembers::get()))] + pub fn set_prime( + origin: OriginFor, + who: AccountIdLookupOf, + ) -> DispatchResultWithPostInfo { T::PrimeOrigin::ensure_origin(origin)?; let who = T::Lookup::lookup(who)?; - Self::members().binary_search(&who).ok().ok_or(Error::::NotMember)?; + let members = Self::members(); + members.binary_search(&who).ok().ok_or(Error::::NotMember)?; Prime::::put(&who); T::MembershipChanged::set_prime(Some(who)); - Ok(()) + Ok(Some(T::WeightInfo::set_prime(members.len() as u32)).into()) } /// Remove the prime member if it exists. /// /// May only be called from `T::PrimeOrigin`. #[pallet::call_index(6)] - #[pallet::weight({50_000_000})] + #[pallet::weight(T::WeightInfo::clear_prime())] pub fn clear_prime(origin: OriginFor) -> DispatchResult { T::PrimeOrigin::ensure_origin(origin)?; Prime::::kill(); @@ -442,7 +460,7 @@ mod benchmark { } // er keep the prime common between incoming and outgoing to make sure it is rejigged. - reset_member { + reset_members { let m in 1 .. T::MaxMembers::get(); let members = (1..m+1).map(|i| account("member", i, SEED)).collect::>(); @@ -500,8 +518,7 @@ mod benchmark { } clear_prime { - let m in 1 .. T::MaxMembers::get(); - let members = (0..m).map(|i| account("member", i, SEED)).collect::>(); + let members = (0..T::MaxMembers::get()).map(|i| account("member", i, SEED)).collect::>(); let prime = members.last().cloned().unwrap(); set_members::(members, None); }: { @@ -526,7 +543,8 @@ mod tests { use sp_runtime::{bounded_vec, traits::BadOrigin, BuildStorage}; use frame_support::{ - assert_noop, assert_ok, derive_impl, ord_parameter_types, parameter_types, + assert_noop, assert_ok, assert_storage_noop, derive_impl, ord_parameter_types, + parameter_types, traits::{ConstU32, StorageVersion}, }; use frame_system::EnsureSignedBy; @@ -716,6 +734,17 @@ mod tests { }); } + #[test] + fn swap_member_with_identical_arguments_changes_nothing() { + new_test_ext().execute_with(|| { + assert_storage_noop!(assert_ok!(Membership::swap_member( + RuntimeOrigin::signed(3), + 10, + 10 + ))); + }); + } + #[test] fn change_key_works() { new_test_ext().execute_with(|| { @@ -745,6 +774,13 @@ mod tests { }); } + #[test] + fn change_key_with_same_caller_as_argument_changes_nothing() { + new_test_ext().execute_with(|| { + assert_storage_noop!(assert_ok!(Membership::change_key(RuntimeOrigin::signed(10), 10))); + }); + } + #[test] fn reset_members_works() { new_test_ext().execute_with(|| { diff --git a/substrate/frame/membership/src/weights.rs b/substrate/frame/membership/src/weights.rs index 18ea7fcb315a3134747743f1a913856d084e5ad6..2d18848b89ab5b55c22235282fb78b514f7ed937 100644 --- a/substrate/frame/membership/src/weights.rs +++ b/substrate/frame/membership/src/weights.rs @@ -55,10 +55,10 @@ pub trait WeightInfo { fn add_member(m: u32, ) -> Weight; fn remove_member(m: u32, ) -> Weight; fn swap_member(m: u32, ) -> Weight; - fn reset_member(m: u32, ) -> Weight; + fn reset_members(m: u32, ) -> Weight; fn change_key(m: u32, ) -> Weight; fn set_prime(m: u32, ) -> Weight; - fn clear_prime(m: u32, ) -> Weight; + fn clear_prime() -> Weight; } /// Weights for pallet_membership using the Substrate node and recommended hardware. @@ -142,7 +142,7 @@ impl WeightInfo for SubstrateWeight { /// Storage: TechnicalCommittee Prime (r:0 w:1) /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) /// The range of component `m` is `[1, 100]`. - fn reset_member(m: u32, ) -> Weight { + fn reset_members(m: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `312 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` @@ -200,15 +200,12 @@ impl WeightInfo for SubstrateWeight { /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) /// Storage: TechnicalCommittee Prime (r:0 w:1) /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `m` is `[1, 100]`. - fn clear_prime(m: u32, ) -> Weight { + fn clear_prime() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` // Minimum execution time: 3_373_000 picoseconds. Weight::from_parts(3_750_452, 0) - // Standard Error: 142 - .saturating_add(Weight::from_parts(505, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().writes(2_u64)) } } @@ -293,7 +290,7 @@ impl WeightInfo for () { /// Storage: TechnicalCommittee Prime (r:0 w:1) /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) /// The range of component `m` is `[1, 100]`. - fn reset_member(m: u32, ) -> Weight { + fn reset_members(m: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `312 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` @@ -351,15 +348,12 @@ impl WeightInfo for () { /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) /// Storage: TechnicalCommittee Prime (r:0 w:1) /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `m` is `[1, 100]`. - fn clear_prime(m: u32, ) -> Weight { + fn clear_prime() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` // Minimum execution time: 3_373_000 picoseconds. Weight::from_parts(3_750_452, 0) - // Standard Error: 142 - .saturating_add(Weight::from_parts(505, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().writes(2_u64)) } } diff --git a/substrate/frame/message-queue/Cargo.toml b/substrate/frame/message-queue/Cargo.toml index 104660a98866a5d624889e95bffa61ad44bc1df7..8d9da7df39ea58ad1f91d45e5db1bc026031ed89 100644 --- a/substrate/frame/message-queue/Cargo.toml +++ b/substrate/frame/message-queue/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", optional = true, features = ["derive"] } +serde = { optional = true, features = ["derive"], workspace = true, default-features = true } log = { workspace = true } environmental = { version = "1.1.4", default-features = false } diff --git a/substrate/frame/message-queue/src/integration_test.rs b/substrate/frame/message-queue/src/integration_test.rs index cc3da6ebdc669e5710867aee63252a4bf5ca6889..ce8eb80805ab0401bf7f4ff4e2448983333c7b36 100644 --- a/substrate/frame/message-queue/src/integration_test.rs +++ b/substrate/frame/message-queue/src/integration_test.rs @@ -330,6 +330,11 @@ fn process_some_messages(num_msgs: u32) { ServiceWeight::set(Some(weight)); let consumed = next_block(); + for origin in BookStateFor::::iter_keys() { + let fp = MessageQueue::footprint(origin); + assert_eq!(fp.pages, fp.ready_pages); + } + assert_eq!(consumed, weight, "\n{}", MessageQueue::debug_info()); assert_eq!(NumMessagesProcessed::take(), num_msgs as usize); } diff --git a/substrate/frame/message-queue/src/lib.rs b/substrate/frame/message-queue/src/lib.rs index 07eb0041985342522a113339769b98f834ef4a17..61028057394f6f3bedda173288fa1c9aea3e9e9a 100644 --- a/substrate/frame/message-queue/src/lib.rs +++ b/substrate/frame/message-queue/src/lib.rs @@ -208,8 +208,9 @@ use frame_support::{ defensive, pallet_prelude::*, traits::{ - Defensive, DefensiveTruncateFrom, EnqueueMessage, ExecuteOverweightError, Footprint, - ProcessMessage, ProcessMessageError, QueueFootprint, QueuePausedQuery, ServiceQueues, + Defensive, DefensiveSaturating, DefensiveTruncateFrom, EnqueueMessage, + ExecuteOverweightError, Footprint, ProcessMessage, ProcessMessageError, QueueFootprint, + QueuePausedQuery, ServiceQueues, }, BoundedSlice, CloneNoBound, DefaultNoBound, }; @@ -442,6 +443,7 @@ impl From> for QueueFootprint { fn from(book: BookState) -> Self { QueueFootprint { pages: book.count, + ready_pages: book.end.defensive_saturating_sub(book.begin), storage: Footprint { count: book.message_count, size: book.size }, } } @@ -1281,6 +1283,9 @@ impl Pallet { ensure!(book.message_count < 1 << 30, "Likely overflow or corruption"); ensure!(book.size < 1 << 30, "Likely overflow or corruption"); ensure!(book.count < 1 << 30, "Likely overflow or corruption"); + + let fp: QueueFootprint = book.into(); + ensure!(fp.ready_pages <= fp.pages, "There cannot be more ready than total pages"); } //loop around this origin diff --git a/substrate/frame/message-queue/src/mock.rs b/substrate/frame/message-queue/src/mock.rs index a46fa31df3e20d302650e68a26d8bcb96e5496a2..d176a981ca8ee2f6911176b1a25907c7bc197566 100644 --- a/substrate/frame/message-queue/src/mock.rs +++ b/substrate/frame/message-queue/src/mock.rs @@ -355,8 +355,8 @@ pub fn num_overweight_enqueued_events() -> u32 { .count() as u32 } -pub fn fp(pages: u32, count: u64, size: u64) -> QueueFootprint { - QueueFootprint { storage: Footprint { count, size }, pages } +pub fn fp(pages: u32, ready_pages: u32, count: u64, size: u64) -> QueueFootprint { + QueueFootprint { storage: Footprint { count, size }, pages, ready_pages } } /// A random seed that can be overwritten with `MQ_SEED`. diff --git a/substrate/frame/message-queue/src/tests.rs b/substrate/frame/message-queue/src/tests.rs index 86a8b79fe8bd01fc5f906ef42e22339c669e57b5..dbef94b3b10354a8b503b795bd46bf90010bb80f 100644 --- a/substrate/frame/message-queue/src/tests.rs +++ b/substrate/frame/message-queue/src/tests.rs @@ -1064,13 +1064,13 @@ fn footprint_num_pages_works() { MessageQueue::enqueue_message(msg("weight=2"), Here); MessageQueue::enqueue_message(msg("weight=3"), Here); - assert_eq!(MessageQueue::footprint(Here), fp(2, 2, 16)); + assert_eq!(MessageQueue::footprint(Here), fp(2, 2, 2, 16)); // Mark the messages as overweight. assert_eq!(MessageQueue::service_queues(1.into_weight()), 0.into_weight()); assert_eq!(System::events().len(), 2); - // Overweight does not change the footprint. - assert_eq!(MessageQueue::footprint(Here), fp(2, 2, 16)); + // `ready_pages` decreases but `page` count does not. + assert_eq!(MessageQueue::footprint(Here), fp(2, 0, 2, 16)); // Now execute the second message. assert_eq!( @@ -1078,7 +1078,7 @@ fn footprint_num_pages_works() { .unwrap(), 3.into_weight() ); - assert_eq!(MessageQueue::footprint(Here), fp(1, 1, 8)); + assert_eq!(MessageQueue::footprint(Here), fp(1, 0, 1, 8)); // And the first one: assert_eq!( ::execute_overweight(2.into_weight(), (Here, 0, 0)) @@ -1086,6 +1086,11 @@ fn footprint_num_pages_works() { 2.into_weight() ); assert_eq!(MessageQueue::footprint(Here), Default::default()); + assert_eq!(MessageQueue::footprint(Here), fp(0, 0, 0, 0)); + + // `ready_pages` and normal `pages` increases again: + MessageQueue::enqueue_message(msg("weight=3"), Here); + assert_eq!(MessageQueue::footprint(Here), fp(1, 1, 1, 8)); }) } diff --git a/substrate/frame/migrations/Cargo.toml b/substrate/frame/migrations/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..2914aa9ea973eafc3eee5419b3b8d8562b23c4f4 --- /dev/null +++ b/substrate/frame/migrations/Cargo.toml @@ -0,0 +1,64 @@ +[package] +name = "pallet-migrations" +version = "1.0.0" +description = "FRAME pallet to execute multi-block migrations." +authors.workspace = true +edition.workspace = true +license.workspace = true +repository.workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +docify = "0.1.14" +impl-trait-for-tuples = "0.2.2" +log = "0.4.18" +scale-info = { version = "2.0.0", default-features = false, features = ["derive"] } + +frame-benchmarking = { default-features = false, optional = true, path = "../benchmarking" } +frame-support = { default-features = false, path = "../support" } +frame-system = { default-features = false, path = "../system" } +sp-core = { path = "../../primitives/core", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } + +[dev-dependencies] +frame-executive = { path = "../executive" } +sp-api = { path = "../../primitives/api", features = ["std"] } +sp-block-builder = { path = "../../primitives/block-builder", features = ["std"] } +sp-io = { path = "../../primitives/io", features = ["std"] } +sp-tracing = { path = "../../primitives/tracing", features = ["std"] } +sp-version = { path = "../../primitives/version", features = ["std"] } + +pretty_assertions = "1.3.0" + +[features] +default = ["std"] + +std = [ + "codec/std", + "frame-benchmarking?/std", + "frame-support/std", + "frame-system/std", + "log/std", + "scale-info/std", + "sp-core/std", + "sp-runtime/std", + "sp-std/std", +] + +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] + +try-runtime = [ + "frame-executive/try-runtime", + "frame-support/try-runtime", + "frame-system/try-runtime", + "sp-runtime/try-runtime", +] diff --git a/substrate/frame/migrations/src/benchmarking.rs b/substrate/frame/migrations/src/benchmarking.rs new file mode 100644 index 0000000000000000000000000000000000000000..8ad1fa50d14985842051c9ac6fb3f95e30bdb030 --- /dev/null +++ b/substrate/frame/migrations/src/benchmarking.rs @@ -0,0 +1,222 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg(feature = "runtime-benchmarks")] + +use super::*; + +use frame_benchmarking::{v2::*, BenchmarkError}; +use frame_system::{Pallet as System, RawOrigin}; +use sp_runtime::traits::One; + +fn assert_last_event(generic_event: ::RuntimeEvent) { + frame_system::Pallet::::assert_last_event(generic_event.into()); +} + +#[benchmarks] +mod benches { + use super::*; + use frame_support::traits::Hooks; + + #[benchmark] + fn onboard_new_mbms() { + T::Migrations::set_fail_after(0); // Should not be called anyway. + assert!(!Cursor::::exists()); + + #[block] + { + Pallet::::onboard_new_mbms(); + } + + assert_last_event::(Event::UpgradeStarted { migrations: 1 }.into()); + } + + #[benchmark] + fn progress_mbms_none() { + T::Migrations::set_fail_after(0); // Should not be called anyway. + assert!(!Cursor::::exists()); + + #[block] + { + Pallet::::progress_mbms(One::one()); + } + } + + /// All migrations completed. + #[benchmark] + fn exec_migration_completed() -> Result<(), BenchmarkError> { + T::Migrations::set_fail_after(0); // Should not be called anyway. + assert_eq!(T::Migrations::len(), 1, "Setup failed"); + let c = ActiveCursor { index: 1, inner_cursor: None, started_at: 0u32.into() }; + let mut meter = WeightMeter::with_limit(T::MaxServiceWeight::get()); + System::::set_block_number(1u32.into()); + + #[block] + { + Pallet::::exec_migration(c, false, &mut meter); + } + + assert_last_event::(Event::UpgradeCompleted {}.into()); + + Ok(()) + } + + /// No migration runs since it is skipped as historic. + #[benchmark] + fn exec_migration_skipped_historic() -> Result<(), BenchmarkError> { + T::Migrations::set_fail_after(0); // Should not be called anyway. + assert_eq!(T::Migrations::len(), 1, "Setup failed"); + let c = ActiveCursor { index: 0, inner_cursor: None, started_at: 0u32.into() }; + + let id: IdentifierOf = T::Migrations::nth_id(0).unwrap().try_into().unwrap(); + Historic::::insert(id, ()); + + let mut meter = WeightMeter::with_limit(T::MaxServiceWeight::get()); + System::::set_block_number(1u32.into()); + + #[block] + { + Pallet::::exec_migration(c, false, &mut meter); + } + + assert_last_event::(Event::MigrationSkipped { index: 0 }.into()); + + Ok(()) + } + + /// Advance a migration by one step. + #[benchmark] + fn exec_migration_advance() -> Result<(), BenchmarkError> { + T::Migrations::set_success_after(1); + assert_eq!(T::Migrations::len(), 1, "Setup failed"); + let c = ActiveCursor { index: 0, inner_cursor: None, started_at: 0u32.into() }; + let mut meter = WeightMeter::with_limit(T::MaxServiceWeight::get()); + System::::set_block_number(1u32.into()); + + #[block] + { + Pallet::::exec_migration(c, false, &mut meter); + } + + assert_last_event::(Event::MigrationAdvanced { index: 0, took: One::one() }.into()); + + Ok(()) + } + + /// Successfully complete a migration. + #[benchmark] + fn exec_migration_complete() -> Result<(), BenchmarkError> { + T::Migrations::set_success_after(0); + assert_eq!(T::Migrations::len(), 1, "Setup failed"); + let c = ActiveCursor { index: 0, inner_cursor: None, started_at: 0u32.into() }; + let mut meter = WeightMeter::with_limit(T::MaxServiceWeight::get()); + System::::set_block_number(1u32.into()); + + #[block] + { + Pallet::::exec_migration(c, false, &mut meter); + } + + assert_last_event::(Event::MigrationCompleted { index: 0, took: One::one() }.into()); + + Ok(()) + } + + #[benchmark] + fn exec_migration_fail() -> Result<(), BenchmarkError> { + T::Migrations::set_fail_after(0); + assert_eq!(T::Migrations::len(), 1, "Setup failed"); + let c = ActiveCursor { index: 0, inner_cursor: None, started_at: 0u32.into() }; + let mut meter = WeightMeter::with_limit(T::MaxServiceWeight::get()); + System::::set_block_number(1u32.into()); + + #[block] + { + Pallet::::exec_migration(c, false, &mut meter); + } + + assert_last_event::(Event::UpgradeFailed {}.into()); + + Ok(()) + } + + #[benchmark] + fn on_init_loop() { + T::Migrations::set_fail_after(0); // Should not be called anyway. + System::::set_block_number(1u32.into()); + Pallet::::on_runtime_upgrade(); + + #[block] + { + Pallet::::on_initialize(1u32.into()); + } + } + + #[benchmark] + fn force_set_cursor() { + #[extrinsic_call] + _(RawOrigin::Root, Some(cursor::())); + } + + #[benchmark] + fn force_set_active_cursor() { + #[extrinsic_call] + _(RawOrigin::Root, 0, None, None); + } + + #[benchmark] + fn force_onboard_mbms() { + #[extrinsic_call] + _(RawOrigin::Root); + } + + #[benchmark] + fn clear_historic(n: Linear<0, { DEFAULT_HISTORIC_BATCH_CLEAR_SIZE * 2 }>) { + let id_max_len = ::IdentifierMaxLen::get(); + assert!(id_max_len >= 4, "Precondition violated"); + + for i in 0..DEFAULT_HISTORIC_BATCH_CLEAR_SIZE * 2 { + let id = IdentifierOf::::truncate_from( + i.encode().into_iter().cycle().take(id_max_len as usize).collect::>(), + ); + + Historic::::insert(&id, ()); + } + + #[extrinsic_call] + _( + RawOrigin::Root, + HistoricCleanupSelector::Wildcard { limit: n.into(), previous_cursor: None }, + ); + } + + fn cursor() -> CursorOf { + // Note: The weight of a function can depend on the weight of reading the `inner_cursor`. + // `Cursor` is a user provided type. Now instead of requiring something like `Cursor: + // From`, we instead rely on the fact that it is MEL and the PoV benchmarking will + // therefore already take the MEL bound, even when the cursor in storage is `None`. + MigrationCursor::Active(ActiveCursor { + index: u32::MAX, + inner_cursor: None, + started_at: 0u32.into(), + }) + } + + // Implements a test for each benchmark. Execute with: + // `cargo test -p pallet-migrations --features runtime-benchmarks`. + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/substrate/frame/migrations/src/lib.rs b/substrate/frame/migrations/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..cd57d89f440f79d39f98a3da81ed57b6cb5bfcf5 --- /dev/null +++ b/substrate/frame/migrations/src/lib.rs @@ -0,0 +1,746 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![deny(missing_docs)] +#![deny(rustdoc::broken_intra_doc_links)] + +//! # `pallet-migrations` +//! +//! Provides multi block migrations for FRAME runtimes. +//! +//! ## Overview +//! +//! The pallet takes care of executing a batch of multi-step migrations over multiple blocks. The +//! process starts on each runtime upgrade. Normal and operational transactions are paused while +//! migrations are on-going. +//! +//! ### Example +//! +//! This example demonstrates a simple mocked walk through of a basic success scenario. The pallet +//! is configured with two migrations: one succeeding after just one step, and the second one +//! succeeding after two steps. A runtime upgrade is then enacted and the block number is advanced +//! until all migrations finish executing. Afterwards, the recorded historic migrations are +//! checked and events are asserted. +#![doc = docify::embed!("substrate/frame/migrations/src/tests.rs", simple_works)] +//! +//! ## Pallet API +//! +//! See the [`pallet`] module for more information about the interfaces this pallet exposes, +//! including its configuration trait, dispatchables, storage items, events and errors. +//! +//! Otherwise noteworthy API of this pallet include its implementation of the +//! [`MultiStepMigrator`] trait. This must be plugged into +//! [`frame_system::Config::MultiBlockMigrator`] for proper function. +//! +//! The API contains some calls for emergency management. They are all prefixed with `force_` and +//! should normally not be needed. Pay special attention prior to using them. +//! +//! ### Design Goals +//! +//! 1. Must automatically execute migrations over multiple blocks. +//! 2. Must expose information about whether migrations are ongoing. +//! 3. Must respect pessimistic weight bounds of migrations. +//! 4. Must execute migrations in order. Skipping is not allowed; migrations are run on a +//! all-or-nothing basis. +//! 5. Must prevent re-execution of past migrations. +//! 6. Must provide transactional storage semantics for migrations. +//! 7. Must guarantee progress. +//! +//! ### Design +//! +//! Migrations are provided to the pallet through the associated type [`Config::Migrations`] of type +//! [`SteppedMigrations`]. This allows multiple migrations to be aggregated through a tuple. It +//! simplifies the trait bounds since all associated types of the trait must be provided by the +//! pallet. The actual progress of the pallet is stored in the [`Cursor`] storage item. This can +//! either be [`MigrationCursor::Active`] or [`MigrationCursor::Stuck`]. In the active case it +//! points to the currently active migration and stores its inner cursor. The inner cursor can then +//! be used by the migration to store its inner state and advance. Each time when the migration +//! returns `Some(cursor)`, it signals the pallet that it is not done yet. +//! The cursor is reset on each runtime upgrade. This ensures that it starts to execute at the +//! first migration in the vector. The pallets cursor is only ever incremented or set to `Stuck` +//! once it encounters an error (Goal 4). Once in the stuck state, the pallet will stay stuck until +//! it is fixed through manual governance intervention. +//! As soon as the cursor of the pallet becomes `Some(_)`; [`MultiStepMigrator::ongoing`] returns +//! `true` (Goal 2). This can be used by upstream code to possibly pause transactions. +//! In `on_initialize` the pallet will load the current migration and check whether it was already +//! executed in the past by checking for membership of its ID in the [`Historic`] set. Historic +//! migrations are skipped without causing an error. Each successfully executed migration is added +//! to this set (Goal 5). +//! This proceeds until no more migrations remain. At that point, the event `UpgradeCompleted` is +//! emitted (Goal 1). +//! The execution of each migration happens by calling [`SteppedMigration::transactional_step`]. +//! This function wraps the inner `step` function into a transactional layer to allow rollback in +//! the error case (Goal 6). +//! Weight limits must be checked by the migration itself. The pallet provides a [`WeightMeter`] for +//! that purpose. The pallet may return [`SteppedMigrationError::InsufficientWeight`] at any point. +//! In that scenario, one of two things will happen: if that migration was exclusively executed +//! in this block, and therefore required more than the maximum amount of weight possible, the +//! process becomes `Stuck`. Otherwise, one re-attempt is executed with the same logic in the next +//! block (Goal 3). Progress through the migrations is guaranteed by providing a timeout for each +//! migration via [`SteppedMigration::max_steps`]. The pallet **ONLY** guarantees progress if this +//! is set to sensible limits (Goal 7). +//! +//! ### Scenario: Governance cleanup +//! +//! Every now and then, governance can make use of the [`clear_historic`][Pallet::clear_historic] +//! call. This ensures that no old migrations pile up in the [`Historic`] set. This can be done very +//! rarely, since the storage should not grow quickly and the lookup weight does not suffer much. +//! Another possibility would be to have a synchronous single-block migration perpetually deployed +//! that cleans them up before the MBMs start. +//! +//! ### Scenario: Successful upgrade +//! +//! The standard procedure for a successful runtime upgrade can look like this: +//! 1. Migrations are configured in the `Migrations` config item. All migrations expose +//! [`max_steps`][SteppedMigration::max_steps], are error tolerant, check their weight bounds and +//! have a unique identifier. +//! 2. The runtime upgrade is enacted. An `UpgradeStarted` event is +//! followed by lots of `MigrationAdvanced` and `MigrationCompleted` events. Finally +//! `UpgradeCompleted` is emitted. +//! 3. Cleanup as described in the governance scenario be executed at any time after the migrations +//! completed. +//! +//! ### Advice: Failed upgrades +//! +//! Failed upgrades cannot be recovered from automatically and require governance intervention. Set +//! up monitoring for `UpgradeFailed` events to be made aware of any failures. The hook +//! [`FailedMigrationHandler::failed`] should be setup in a way that it allows governance to act, +//! but still prevent other transactions from interacting with the inconsistent storage state. Note +//! that this is paramount, since the inconsistent state might contain a faulty balance amount or +//! similar that could cause great harm if user transactions don't remain suspended. One way to +//! implement this would be to use the `SafeMode` or `TxPause` pallets that can prevent most user +//! interactions but still allow a whitelisted set of governance calls. +//! +//! ### Remark: Failed migrations +//! +//! Failed migrations are not added to the `Historic` set. This means that an erroneous +//! migration must be removed and fixed manually. This already applies, even before considering the +//! historic set. +//! +//! ### Remark: Transactional processing +//! +//! You can see the transactional semantics for migration steps as mostly useless, since in the +//! stuck case the state is already messed up. This just prevents it from becoming even more messed +//! up, but doesn't prevent it in the first place. + +#![cfg_attr(not(feature = "std"), no_std)] + +mod benchmarking; +mod mock; +pub mod mock_helpers; +mod tests; +pub mod weights; + +pub use pallet::*; +pub use weights::WeightInfo; + +use codec::{Decode, Encode, MaxEncodedLen}; +use core::ops::ControlFlow; +use frame_support::{ + defensive, defensive_assert, + migrations::*, + traits::Get, + weights::{Weight, WeightMeter}, + BoundedVec, +}; +use frame_system::{pallet_prelude::BlockNumberFor, Pallet as System}; +use sp_runtime::Saturating; +use sp_std::vec::Vec; + +/// Points to the next migration to execute. +#[derive(Debug, Clone, Eq, PartialEq, Encode, Decode, scale_info::TypeInfo, MaxEncodedLen)] +pub enum MigrationCursor { + /// Points to the currently active migration and its inner cursor. + Active(ActiveCursor), + + /// Migration got stuck and cannot proceed. This is bad. + Stuck, +} + +impl MigrationCursor { + /// Try to return self as an [`ActiveCursor`]. + pub fn as_active(&self) -> Option<&ActiveCursor> { + match self { + MigrationCursor::Active(active) => Some(active), + MigrationCursor::Stuck => None, + } + } +} + +impl From> + for MigrationCursor +{ + fn from(active: ActiveCursor) -> Self { + MigrationCursor::Active(active) + } +} + +/// Points to the currently active migration and its inner cursor. +#[derive(Debug, Clone, Eq, PartialEq, Encode, Decode, scale_info::TypeInfo, MaxEncodedLen)] +pub struct ActiveCursor { + /// The index of the migration in the MBM tuple. + pub index: u32, + /// The cursor of the migration that is referenced by `index`. + pub inner_cursor: Option, + /// The block number that the migration started at. + /// + /// This is used to calculate how many blocks it took. + pub started_at: BlockNumber, +} + +impl ActiveCursor { + /// Advance the overarching cursor to the next migration. + pub(crate) fn goto_next_migration(&mut self, current_block: BlockNumber) { + self.index.saturating_inc(); + self.inner_cursor = None; + self.started_at = current_block; + } +} + +/// How to clear the records of historic migrations. +#[derive(Debug, Clone, Eq, PartialEq, Encode, Decode, scale_info::TypeInfo)] +pub enum HistoricCleanupSelector { + /// Clear exactly these entries. + /// + /// This is the advised way of doing it. + Specific(Vec), + + /// Clear up to this many entries + Wildcard { + /// How many should be cleared in this call at most. + limit: Option, + /// The cursor that was emitted from any previous `HistoricCleared`. + /// + /// Does not need to be passed when clearing the first batch. + previous_cursor: Option>, + }, +} + +/// The default number of entries that should be cleared by a `HistoricCleanupSelector::Wildcard`. +/// +/// The caller can explicitly specify a higher amount. Benchmarks are run with twice this value. +const DEFAULT_HISTORIC_BATCH_CLEAR_SIZE: u32 = 128; + +impl HistoricCleanupSelector { + /// The maximal number of entries that this will remove. + /// + /// Needed for weight calculation. + pub fn limit(&self) -> u32 { + match self { + Self::Specific(ids) => ids.len() as u32, + Self::Wildcard { limit, .. } => limit.unwrap_or(DEFAULT_HISTORIC_BATCH_CLEAR_SIZE), + } + } +} + +/// Convenience alias for [`MigrationCursor`]. +pub type CursorOf = MigrationCursor, BlockNumberFor>; + +/// Convenience alias for the raw inner cursor of a migration. +pub type RawCursorOf = BoundedVec::CursorMaxLen>; + +/// Convenience alias for the identifier of a migration. +pub type IdentifierOf = BoundedVec::IdentifierMaxLen>; + +/// Convenience alias for [`ActiveCursor`]. +pub type ActiveCursorOf = ActiveCursor, BlockNumberFor>; + +/// Trait for a tuple of No-OP migrations with one element. +pub trait MockedMigrations: SteppedMigrations { + /// The migration should fail after `n` steps. + fn set_fail_after(n: u32); + /// The migration should succeed after `n` steps. + fn set_success_after(n: u32); +} + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type of the runtime. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// All the multi-block migrations to run. + /// + /// Should only be updated in a runtime-upgrade once all the old migrations have completed. + /// (Check that [`Cursor`] is `None`). + #[cfg(not(feature = "runtime-benchmarks"))] + type Migrations: SteppedMigrations; + + /// Mocked migrations for benchmarking only. + /// + /// Should be configured to [`crate::mock_helpers::MockedMigrations`] in benchmarks. + #[cfg(feature = "runtime-benchmarks")] + type Migrations: MockedMigrations; + + /// The maximal length of an encoded cursor. + /// + /// A good default needs to selected such that no migration will ever have a cursor with MEL + /// above this limit. This is statically checked in `integrity_test`. + #[pallet::constant] + type CursorMaxLen: Get; + + /// The maximal length of an encoded identifier. + /// + /// A good default needs to selected such that no migration will ever have an identifier + /// with MEL above this limit. This is statically checked in `integrity_test`. + #[pallet::constant] + type IdentifierMaxLen: Get; + + /// Notifications for status updates of a runtime upgrade. + /// + /// Could be used to pause XCM etc. + type MigrationStatusHandler: MigrationStatusHandler; + + /// Handler for failed migrations. + type FailedMigrationHandler: FailedMigrationHandler; + + /// The maximum weight to spend each block to execute migrations. + type MaxServiceWeight: Get; + + /// Weight information for the calls and functions of this pallet. + type WeightInfo: WeightInfo; + } + + /// The currently active migration to run and its cursor. + /// + /// `None` indicates that no migration is running. + #[pallet::storage] + pub type Cursor = StorageValue<_, CursorOf, OptionQuery>; + + /// Set of all successfully executed migrations. + /// + /// This is used as blacklist, to not re-execute migrations that have not been removed from the + /// codebase yet. Governance can regularly clear this out via `clear_historic`. + #[pallet::storage] + pub type Historic = StorageMap<_, Twox64Concat, IdentifierOf, (), OptionQuery>; + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// A Runtime upgrade started. + /// + /// Its end is indicated by `UpgradeCompleted` or `UpgradeFailed`. + UpgradeStarted { + /// The number of migrations that this upgrade contains. + /// + /// This can be used to design a progress indicator in combination with counting the + /// `MigrationCompleted` and `MigrationSkipped` events. + migrations: u32, + }, + /// The current runtime upgrade completed. + /// + /// This implies that all of its migrations completed successfully as well. + UpgradeCompleted, + /// Runtime upgrade failed. + /// + /// This is very bad and will require governance intervention. + UpgradeFailed, + /// A migration was skipped since it was already executed in the past. + MigrationSkipped { + /// The index of the skipped migration within the [`Config::Migrations`] list. + index: u32, + }, + /// A migration progressed. + MigrationAdvanced { + /// The index of the migration within the [`Config::Migrations`] list. + index: u32, + /// The number of blocks that this migration took so far. + took: BlockNumberFor, + }, + /// A Migration completed. + MigrationCompleted { + /// The index of the migration within the [`Config::Migrations`] list. + index: u32, + /// The number of blocks that this migration took so far. + took: BlockNumberFor, + }, + /// A Migration failed. + /// + /// This implies that the whole upgrade failed and governance intervention is required. + MigrationFailed { + /// The index of the migration within the [`Config::Migrations`] list. + index: u32, + /// The number of blocks that this migration took so far. + took: BlockNumberFor, + }, + /// The set of historical migrations has been cleared. + HistoricCleared { + /// Should be passed to `clear_historic` in a successive call. + next_cursor: Option>, + }, + } + + #[pallet::error] + pub enum Error { + /// The operation cannot complete since some MBMs are ongoing. + Ongoing, + } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_runtime_upgrade() -> Weight { + Self::onboard_new_mbms() + } + + #[cfg(feature = "std")] + fn integrity_test() { + // Check that the migrations tuple is legit. + frame_support::assert_ok!(T::Migrations::integrity_test()); + + // Very important! Ensure that the pallet is configured in `System::Config`. + { + assert!(!Cursor::::exists(), "Externalities storage should be clean"); + assert!(!::MultiBlockMigrator::ongoing()); + + Cursor::::put(MigrationCursor::Stuck); + assert!(::MultiBlockMigrator::ongoing()); + + Cursor::::kill(); + } + + // The per-block service weight is sane. + #[cfg(not(test))] + { + let want = T::MaxServiceWeight::get(); + let max = ::BlockWeights::get().max_block; + + assert!(want.all_lte(max), "Service weight is larger than a block: {want} > {max}",); + } + + // Cursor MEL + { + let mel = T::Migrations::cursor_max_encoded_len(); + let max_mel = T::CursorMaxLen::get() as usize; + assert!( + mel <= max_mel, + "A Cursor is not guaranteed to fit into the storage: {mel} > {max_mel}", + ); + } + + // Identifier MEL + { + let mel = T::Migrations::identifier_max_encoded_len(); + let max_mel = T::IdentifierMaxLen::get() as usize; + assert!( + mel <= max_mel, + "An Identifier is not guaranteed to fit into the storage: {mel} > {max_mel}", + ); + } + } + } + + #[pallet::call(weight = T::WeightInfo)] + impl Pallet { + /// Allows root to set a cursor to forcefully start, stop or forward the migration process. + /// + /// Should normally not be needed and is only in place as emergency measure. Note that + /// restarting the migration process in this manner will not call the + /// [`MigrationStatusHandler::started`] hook or emit an `UpgradeStarted` event. + #[pallet::call_index(0)] + pub fn force_set_cursor( + origin: OriginFor, + cursor: Option>, + ) -> DispatchResult { + ensure_root(origin)?; + + Cursor::::set(cursor); + + Ok(()) + } + + /// Allows root to set an active cursor to forcefully start/forward the migration process. + /// + /// This is an edge-case version of [`Self::force_set_cursor`] that allows to set the + /// `started_at` value to the next block number. Otherwise this would not be possible, since + /// `force_set_cursor` takes an absolute block number. Setting `started_at` to `None` + /// indicates that the current block number plus one should be used. + #[pallet::call_index(1)] + pub fn force_set_active_cursor( + origin: OriginFor, + index: u32, + inner_cursor: Option>, + started_at: Option>, + ) -> DispatchResult { + ensure_root(origin)?; + + let started_at = started_at.unwrap_or( + System::::block_number().saturating_add(sp_runtime::traits::One::one()), + ); + Cursor::::put(MigrationCursor::Active(ActiveCursor { + index, + inner_cursor, + started_at, + })); + + Ok(()) + } + + /// Forces the onboarding of the migrations. + /// + /// This process happens automatically on a runtime upgrade. It is in place as an emergency + /// measurement. The cursor needs to be `None` for this to succeed. + #[pallet::call_index(2)] + pub fn force_onboard_mbms(origin: OriginFor) -> DispatchResult { + ensure_root(origin)?; + + ensure!(!Cursor::::exists(), Error::::Ongoing); + Self::onboard_new_mbms(); + + Ok(()) + } + + /// Clears the `Historic` set. + /// + /// `map_cursor` must be set to the last value that was returned by the + /// `HistoricCleared` event. The first time `None` can be used. `limit` must be chosen in a + /// way that will result in a sensible weight. + #[pallet::call_index(3)] + #[pallet::weight(T::WeightInfo::clear_historic(selector.limit()))] + pub fn clear_historic( + origin: OriginFor, + selector: HistoricCleanupSelector>, + ) -> DispatchResult { + ensure_root(origin)?; + + match &selector { + HistoricCleanupSelector::Specific(ids) => { + for id in ids { + Historic::::remove(id); + } + Self::deposit_event(Event::HistoricCleared { next_cursor: None }); + }, + HistoricCleanupSelector::Wildcard { previous_cursor, .. } => { + let next = Historic::::clear(selector.limit(), previous_cursor.as_deref()); + Self::deposit_event(Event::HistoricCleared { next_cursor: next.maybe_cursor }); + }, + } + + Ok(()) + } + } +} + +impl Pallet { + /// Onboard all new Multi-Block-Migrations and start the process of executing them. + /// + /// Should only be called once all previous migrations completed. + fn onboard_new_mbms() -> Weight { + if let Some(cursor) = Cursor::::get() { + log::error!("Ongoing migrations interrupted - chain stuck"); + + let maybe_index = cursor.as_active().map(|c| c.index); + Self::upgrade_failed(maybe_index); + return T::WeightInfo::onboard_new_mbms() + } + + let migrations = T::Migrations::len(); + log::debug!("Onboarding {migrations} new MBM migrations"); + + if migrations > 0 { + // Set the cursor to the first migration: + Cursor::::set(Some( + ActiveCursor { + index: 0, + inner_cursor: None, + started_at: System::::block_number(), + } + .into(), + )); + Self::deposit_event(Event::UpgradeStarted { migrations }); + T::MigrationStatusHandler::started(); + } + + T::WeightInfo::onboard_new_mbms() + } + + /// Tries to make progress on the Multi-Block-Migrations process. + fn progress_mbms(n: BlockNumberFor) -> Weight { + let mut meter = WeightMeter::with_limit(T::MaxServiceWeight::get()); + meter.consume(T::WeightInfo::progress_mbms_none()); + + let mut cursor = match Cursor::::get() { + None => { + log::trace!("[Block {n:?}] Waiting for cursor to become `Some`."); + return meter.consumed() + }, + Some(MigrationCursor::Active(cursor)) => { + log::debug!("Progressing MBM #{}", cursor.index); + cursor + }, + Some(MigrationCursor::Stuck) => { + log::error!("Migration stuck. Governance intervention required."); + return meter.consumed() + }, + }; + debug_assert!(Self::ongoing()); + + // The limit here is a defensive measure to prevent an infinite loop. It expresses that we + // allow no more than 8 MBMs to finish in a single block. This should be harmless, since we + // generally expect *Multi*-Block-Migrations to take *multiple* blocks. + for i in 0..8 { + match Self::exec_migration(cursor, i == 0, &mut meter) { + None => return meter.consumed(), + Some(ControlFlow::Continue(next_cursor)) => { + cursor = next_cursor; + }, + Some(ControlFlow::Break(last_cursor)) => { + cursor = last_cursor; + break + }, + } + } + + Cursor::::set(Some(cursor.into())); + + meter.consumed() + } + + /// Try to make progress on the current migration. + /// + /// Returns whether processing should continue or break for this block. The return value means: + /// - `None`: The migration process is completely finished. + /// - `ControlFlow::Break`: Continue in the *next* block with the given cursor. + /// - `ControlFlow::Continue`: Continue in the *current* block with the given cursor. + fn exec_migration( + mut cursor: ActiveCursorOf, + is_first: bool, + meter: &mut WeightMeter, + ) -> Option, ActiveCursorOf>> { + // The differences between the single branches' weights is not that big. And since we do + // only one step per block, we can just use the maximum instead of more precise accounting. + if meter.try_consume(Self::exec_migration_max_weight()).is_err() { + defensive_assert!(!is_first, "There should be enough weight to do this at least once"); + return Some(ControlFlow::Break(cursor)) + } + + let Some(id) = T::Migrations::nth_id(cursor.index) else { + // No more migrations in the tuple - we are done. + defensive_assert!(cursor.index == T::Migrations::len(), "Inconsistent MBMs tuple"); + Self::deposit_event(Event::UpgradeCompleted); + Cursor::::kill(); + T::MigrationStatusHandler::completed(); + return None; + }; + + let Ok(bounded_id): Result, _> = id.try_into() else { + defensive!("integrity_test ensures that all identifiers' MEL bounds fit into CursorMaxLen; qed."); + Self::upgrade_failed(Some(cursor.index)); + return None + }; + + if Historic::::contains_key(&bounded_id) { + Self::deposit_event(Event::MigrationSkipped { index: cursor.index }); + cursor.goto_next_migration(System::::block_number()); + return Some(ControlFlow::Continue(cursor)) + } + + let max_steps = T::Migrations::nth_max_steps(cursor.index); + let next_cursor = T::Migrations::nth_transactional_step( + cursor.index, + cursor.inner_cursor.clone().map(|c| c.into_inner()), + meter, + ); + let Some((max_steps, next_cursor)) = max_steps.zip(next_cursor) else { + defensive!("integrity_test ensures that the tuple is valid; qed"); + Self::upgrade_failed(Some(cursor.index)); + return None + }; + + let took = System::::block_number().saturating_sub(cursor.started_at); + match next_cursor { + Ok(Some(next_cursor)) => { + let Ok(bound_next_cursor) = next_cursor.try_into() else { + defensive!("The integrity check ensures that all cursors' MEL bound fits into CursorMaxLen; qed"); + Self::upgrade_failed(Some(cursor.index)); + return None + }; + + Self::deposit_event(Event::MigrationAdvanced { index: cursor.index, took }); + cursor.inner_cursor = Some(bound_next_cursor); + + if max_steps.map_or(false, |max| took > max.into()) { + Self::deposit_event(Event::MigrationFailed { index: cursor.index, took }); + Self::upgrade_failed(Some(cursor.index)); + None + } else { + // A migration cannot progress more than one step per block, we therefore break. + Some(ControlFlow::Break(cursor)) + } + }, + Ok(None) => { + // A migration is done when it returns cursor `None`. + Self::deposit_event(Event::MigrationCompleted { index: cursor.index, took }); + Historic::::insert(&bounded_id, ()); + cursor.goto_next_migration(System::::block_number()); + Some(ControlFlow::Continue(cursor)) + }, + Err(SteppedMigrationError::InsufficientWeight { required }) => { + if is_first || required.any_gt(meter.limit()) { + Self::deposit_event(Event::MigrationFailed { index: cursor.index, took }); + Self::upgrade_failed(Some(cursor.index)); + None + } else { + // Retry and hope that there is more weight in the next block. + Some(ControlFlow::Break(cursor)) + } + }, + Err(SteppedMigrationError::InvalidCursor | SteppedMigrationError::Failed) => { + Self::deposit_event(Event::MigrationFailed { index: cursor.index, took }); + Self::upgrade_failed(Some(cursor.index)); + None + }, + } + } + + /// Fail the current runtime upgrade, caused by `migration`. + fn upgrade_failed(migration: Option) { + use FailedMigrationHandling::*; + Self::deposit_event(Event::UpgradeFailed); + + match T::FailedMigrationHandler::failed(migration) { + KeepStuck => Cursor::::set(Some(MigrationCursor::Stuck)), + ForceUnstuck => Cursor::::kill(), + Ignore => {}, + } + } + + fn exec_migration_max_weight() -> Weight { + T::WeightInfo::exec_migration_complete() + .max(T::WeightInfo::exec_migration_completed()) + .max(T::WeightInfo::exec_migration_skipped_historic()) + .max(T::WeightInfo::exec_migration_advance()) + .max(T::WeightInfo::exec_migration_fail()) + } +} + +impl MultiStepMigrator for Pallet { + fn ongoing() -> bool { + Cursor::::exists() + } + + fn step() -> Weight { + Self::progress_mbms(System::::block_number()) + } +} diff --git a/substrate/frame/migrations/src/mock.rs b/substrate/frame/migrations/src/mock.rs new file mode 100644 index 0000000000000000000000000000000000000000..3729264755442a008641668b0a14214764d255b9 --- /dev/null +++ b/substrate/frame/migrations/src/mock.rs @@ -0,0 +1,163 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Mocked runtime for testing the migrations pallet. + +#![cfg(test)] + +use crate::{mock_helpers::*, Event, Historic}; + +use frame_support::{ + derive_impl, + migrations::*, + traits::{OnFinalize, OnInitialize}, + weights::Weight, +}; +use frame_system::EventRecord; +use sp_core::{ConstU32, H256}; + +type Block = frame_system::mocking::MockBlock; + +// Configure a mock runtime to test the pallet. +frame_support::construct_runtime!( + pub enum Test { + System: frame_system, + Migrations: crate, + } +); + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Test { + type Block = Block; + type PalletInfo = PalletInfo; + type MultiBlockMigrator = Migrations; +} + +frame_support::parameter_types! { + pub const MaxServiceWeight: Weight = Weight::MAX.div(10); +} + +impl crate::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Migrations = MockedMigrations; + type CursorMaxLen = ConstU32<65_536>; + type IdentifierMaxLen = ConstU32<256>; + type MigrationStatusHandler = MockedMigrationStatusHandler; + type FailedMigrationHandler = MockedFailedMigrationHandler; + type MaxServiceWeight = MaxServiceWeight; + type WeightInfo = (); +} + +frame_support::parameter_types! { + /// The number of started upgrades. + pub static UpgradesStarted: u32 = 0; + /// The number of completed upgrades. + pub static UpgradesCompleted: u32 = 0; + /// The migrations that failed. + pub static UpgradesFailed: Vec> = vec![]; + /// Return value of [`MockedFailedMigrationHandler::failed`]. + pub static FailedUpgradeResponse: FailedMigrationHandling = FailedMigrationHandling::KeepStuck; +} + +/// Records all started and completed upgrades in `UpgradesStarted` and `UpgradesCompleted`. +pub struct MockedMigrationStatusHandler; +impl MigrationStatusHandler for MockedMigrationStatusHandler { + fn started() { + log::info!("MigrationStatusHandler started"); + UpgradesStarted::mutate(|v| *v += 1); + } + + fn completed() { + log::info!("MigrationStatusHandler completed"); + UpgradesCompleted::mutate(|v| *v += 1); + } +} + +/// Records all failed upgrades in `UpgradesFailed`. +pub struct MockedFailedMigrationHandler; +impl FailedMigrationHandler for MockedFailedMigrationHandler { + fn failed(migration: Option) -> FailedMigrationHandling { + UpgradesFailed::mutate(|v| v.push(migration)); + let res = FailedUpgradeResponse::get(); + log::error!("FailedMigrationHandler failed at: {migration:?}, handling as {res:?}"); + res + } +} + +/// Returns the number of `(started, completed, failed)` upgrades and resets their numbers. +pub fn upgrades_started_completed_failed() -> (u32, u32, u32) { + (UpgradesStarted::take(), UpgradesCompleted::take(), UpgradesFailed::take().len() as u32) +} + +/// Build genesis storage according to the mock runtime. +pub fn new_test_ext() -> sp_io::TestExternalities { + sp_io::TestExternalities::new(Default::default()) +} + +/// Run this closure in test externalities. +pub fn test_closure(f: impl FnOnce() -> R) -> R { + let mut ext = new_test_ext(); + ext.execute_with(f) +} + +pub fn run_to_block(n: u32) { + while System::block_number() < n as u64 { + log::debug!("Block {}", System::block_number()); + System::set_block_number(System::block_number() + 1); + System::on_initialize(System::block_number()); + Migrations::on_initialize(System::block_number()); + // Executive calls this: + ::step(); + + Migrations::on_finalize(System::block_number()); + System::on_finalize(System::block_number()); + } +} + +/// Returns the historic migrations, sorted by their identifier. +pub fn historic() -> Vec { + let mut historic = Historic::::iter_keys().collect::>(); + historic.sort(); + historic +} + +// Traits to make using events less insufferable: +pub trait IntoRecord { + fn into_record(self) -> EventRecord<::RuntimeEvent, H256>; +} + +impl IntoRecord for Event { + fn into_record(self) -> EventRecord<::RuntimeEvent, H256> { + let re: ::RuntimeEvent = self.into(); + EventRecord { phase: frame_system::Phase::Initialization, event: re, topics: vec![] } + } +} + +pub trait IntoRecords { + fn into_records(self) -> Vec::RuntimeEvent, H256>>; +} + +impl IntoRecords for Vec { + fn into_records(self) -> Vec::RuntimeEvent, H256>> { + self.into_iter().map(|e| e.into_record()).collect() + } +} + +pub fn assert_events(events: Vec) { + pretty_assertions::assert_eq!(events.into_records(), System::events()); + System::reset_events(); +} diff --git a/substrate/frame/migrations/src/mock_helpers.rs b/substrate/frame/migrations/src/mock_helpers.rs new file mode 100644 index 0000000000000000000000000000000000000000..c5e23efb4e314e6f5562c50521a533c82f94669e --- /dev/null +++ b/substrate/frame/migrations/src/mock_helpers.rs @@ -0,0 +1,142 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Test helpers for internal and external usage. + +#![allow(missing_docs)] + +use codec::{Decode, Encode}; +use frame_support::{ + migrations::*, + weights::{Weight, WeightMeter}, +}; +use sp_core::ConstU32; +use sp_runtime::BoundedVec; +use sp_std::{vec, vec::Vec}; + +/// Opaque identifier of a migration. +pub type MockedIdentifier = BoundedVec>; + +/// How a mocked migration should behave. +#[derive(Debug, Clone, Copy, Encode, Decode)] +pub enum MockedMigrationKind { + /// Succeed after its number of steps elapsed. + SucceedAfter, + /// Fail after its number of steps elapsed. + FailAfter, + /// Never terminate. + TimeoutAfter, + /// Cause an [`SteppedMigrationError::InsufficientWeight`] error after its number of steps + /// elapsed. + HighWeightAfter(Weight), +} +use MockedMigrationKind::*; // C style + +/// Creates a migration identifier with a specific `kind` and `steps`. +pub fn mocked_id(kind: MockedMigrationKind, steps: u32) -> MockedIdentifier { + (b"MockedMigration", kind, steps).encode().try_into().unwrap() +} + +frame_support::parameter_types! { + /// The configs for the migrations to run. + storage MIGRATIONS: Vec<(MockedMigrationKind, u32)> = vec![]; +} + +/// Allows to set the migrations to run at runtime instead of compile-time. +/// +/// It achieves this by using the storage to store the migrations to run. +pub struct MockedMigrations; +impl SteppedMigrations for MockedMigrations { + fn len() -> u32 { + MIGRATIONS::get().len() as u32 + } + + fn nth_id(n: u32) -> Option> { + let k = MIGRATIONS::get().get(n as usize).copied(); + k.map(|(kind, steps)| mocked_id(kind, steps).into_inner()) + } + + fn nth_step( + n: u32, + cursor: Option>, + _meter: &mut WeightMeter, + ) -> Option>, SteppedMigrationError>> { + let (kind, steps) = MIGRATIONS::get()[n as usize]; + + let mut count: u32 = + cursor.as_ref().and_then(|c| Decode::decode(&mut &c[..]).ok()).unwrap_or(0); + log::debug!("MockedMigration: Step {}", count); + if count != steps || matches!(kind, TimeoutAfter) { + count += 1; + return Some(Ok(Some(count.encode()))) + } + + Some(match kind { + SucceedAfter => { + log::debug!("MockedMigration: Succeeded after {} steps", count); + Ok(None) + }, + HighWeightAfter(required) => { + log::debug!("MockedMigration: Not enough weight after {} steps", count); + Err(SteppedMigrationError::InsufficientWeight { required }) + }, + FailAfter => { + log::debug!("MockedMigration: Failed after {} steps", count); + Err(SteppedMigrationError::Failed) + }, + TimeoutAfter => unreachable!(), + }) + } + + fn nth_transactional_step( + n: u32, + cursor: Option>, + meter: &mut WeightMeter, + ) -> Option>, SteppedMigrationError>> { + // This is a hack but should be fine. We dont need it in testing. + Self::nth_step(n, cursor, meter) + } + + fn nth_max_steps(n: u32) -> Option> { + MIGRATIONS::get().get(n as usize).map(|(_, s)| Some(*s)) + } + + fn cursor_max_encoded_len() -> usize { + 65_536 + } + + fn identifier_max_encoded_len() -> usize { + 256 + } +} + +impl MockedMigrations { + /// Set the migrations to run. + pub fn set(migrations: Vec<(MockedMigrationKind, u32)>) { + MIGRATIONS::set(&migrations); + } +} + +impl crate::MockedMigrations for MockedMigrations { + fn set_fail_after(steps: u32) { + MIGRATIONS::set(&vec![(FailAfter, steps)]); + } + + fn set_success_after(steps: u32) { + MIGRATIONS::set(&vec![(SucceedAfter, steps)]); + } +} diff --git a/substrate/frame/migrations/src/tests.rs b/substrate/frame/migrations/src/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..9c9043d37a62e5f1df3d12da979b68251621c9d0 --- /dev/null +++ b/substrate/frame/migrations/src/tests.rs @@ -0,0 +1,335 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg(test)] + +use crate::{ + mock::{Test as T, *}, + mock_helpers::{MockedMigrationKind::*, *}, + Cursor, Event, FailedMigrationHandling, MigrationCursor, +}; +use frame_support::{pallet_prelude::Weight, traits::OnRuntimeUpgrade}; + +#[docify::export] +#[test] +fn simple_works() { + use Event::*; + test_closure(|| { + // Add three migrations, each taking one block longer than the previous. + MockedMigrations::set(vec![(SucceedAfter, 0), (SucceedAfter, 1), (SucceedAfter, 2)]); + + System::set_block_number(1); + Migrations::on_runtime_upgrade(); + run_to_block(10); + + // Check that the executed migrations are recorded in `Historical`. + assert_eq!( + historic(), + vec![ + mocked_id(SucceedAfter, 0), + mocked_id(SucceedAfter, 1), + mocked_id(SucceedAfter, 2), + ] + ); + + // Check that we got all events. + assert_events(vec![ + UpgradeStarted { migrations: 3 }, + MigrationCompleted { index: 0, took: 1 }, + MigrationAdvanced { index: 1, took: 0 }, + MigrationCompleted { index: 1, took: 1 }, + MigrationAdvanced { index: 2, took: 0 }, + MigrationAdvanced { index: 2, took: 1 }, + MigrationCompleted { index: 2, took: 2 }, + UpgradeCompleted, + ]); + }); +} + +#[test] +fn failing_migration_sets_cursor_to_stuck() { + test_closure(|| { + FailedUpgradeResponse::set(FailedMigrationHandling::KeepStuck); + MockedMigrations::set(vec![(FailAfter, 2)]); + + System::set_block_number(1); + Migrations::on_runtime_upgrade(); + run_to_block(10); + + // Failed migrations are not recorded in `Historical`. + assert!(historic().is_empty()); + // Check that we got all events. + assert_events(vec![ + Event::UpgradeStarted { migrations: 1 }, + Event::MigrationAdvanced { index: 0, took: 1 }, + Event::MigrationAdvanced { index: 0, took: 2 }, + Event::MigrationFailed { index: 0, took: 3 }, + Event::UpgradeFailed, + ]); + + // Check that the handler was called correctly. + assert_eq!(UpgradesStarted::take(), 1); + assert_eq!(UpgradesCompleted::take(), 0); + assert_eq!(UpgradesFailed::take(), vec![Some(0)]); + + assert_eq!(Cursor::::get(), Some(MigrationCursor::Stuck), "Must stuck the chain"); + }); +} + +#[test] +fn failing_migration_force_unstuck_works() { + test_closure(|| { + FailedUpgradeResponse::set(FailedMigrationHandling::ForceUnstuck); + MockedMigrations::set(vec![(FailAfter, 2)]); + + System::set_block_number(1); + Migrations::on_runtime_upgrade(); + run_to_block(10); + + // Failed migrations are not recorded in `Historical`. + assert!(historic().is_empty()); + // Check that we got all events. + assert_events(vec![ + Event::UpgradeStarted { migrations: 1 }, + Event::MigrationAdvanced { index: 0, took: 1 }, + Event::MigrationAdvanced { index: 0, took: 2 }, + Event::MigrationFailed { index: 0, took: 3 }, + Event::UpgradeFailed, + ]); + + // Check that the handler was called correctly. + assert_eq!(UpgradesStarted::take(), 1); + assert_eq!(UpgradesCompleted::take(), 0); + assert_eq!(UpgradesFailed::take(), vec![Some(0)]); + + assert!(Cursor::::get().is_none(), "Must unstuck the chain"); + }); +} + +/// A migration that reports not getting enough weight errors if it is the first one to run in that +/// block. +#[test] +fn high_weight_migration_singular_fails() { + test_closure(|| { + MockedMigrations::set(vec![(HighWeightAfter(Weight::zero()), 2)]); + + System::set_block_number(1); + Migrations::on_runtime_upgrade(); + run_to_block(10); + + // Failed migrations are not recorded in `Historical`. + assert!(historic().is_empty()); + // Check that we got all events. + assert_events(vec![ + Event::UpgradeStarted { migrations: 1 }, + Event::MigrationAdvanced { index: 0, took: 1 }, + Event::MigrationAdvanced { index: 0, took: 2 }, + Event::MigrationFailed { index: 0, took: 3 }, + Event::UpgradeFailed, + ]); + + // Check that the handler was called correctly. + assert_eq!(upgrades_started_completed_failed(), (1, 0, 1)); + assert_eq!(Cursor::::get(), Some(MigrationCursor::Stuck)); + }); +} + +/// A migration that reports of not getting enough weight is retried once, if it is not the first +/// one to run in a block. +#[test] +fn high_weight_migration_retries_once() { + test_closure(|| { + MockedMigrations::set(vec![(SucceedAfter, 0), (HighWeightAfter(Weight::zero()), 0)]); + + System::set_block_number(1); + Migrations::on_runtime_upgrade(); + run_to_block(10); + + assert_eq!(historic(), vec![mocked_id(SucceedAfter, 0)]); + // Check that we got all events. + assert_events::>(vec![ + Event::UpgradeStarted { migrations: 2 }, + Event::MigrationCompleted { index: 0, took: 1 }, + // `took=1` means that it was retried once. + Event::MigrationFailed { index: 1, took: 1 }, + Event::UpgradeFailed, + ]); + + // Check that the handler was called correctly. + assert_eq!(upgrades_started_completed_failed(), (1, 0, 1)); + assert_eq!(Cursor::::get(), Some(MigrationCursor::Stuck)); + }); +} + +/// If a migration uses more weight than the limit, then it will not retry but fail even when it is +/// not the first one in the block. +// Note: Same as `high_weight_migration_retries_once` but with different required weight for the +// migration. +#[test] +fn high_weight_migration_permanently_overweight_fails() { + test_closure(|| { + MockedMigrations::set(vec![(SucceedAfter, 0), (HighWeightAfter(Weight::MAX), 0)]); + + System::set_block_number(1); + Migrations::on_runtime_upgrade(); + run_to_block(10); + + assert_eq!(historic(), vec![mocked_id(SucceedAfter, 0)]); + // Check that we got all events. + assert_events::>(vec![ + Event::UpgradeStarted { migrations: 2 }, + Event::MigrationCompleted { index: 0, took: 1 }, + // `blocks=0` means that it was not retried. + Event::MigrationFailed { index: 1, took: 0 }, + Event::UpgradeFailed, + ]); + + // Check that the handler was called correctly. + assert_eq!(upgrades_started_completed_failed(), (1, 0, 1)); + assert_eq!(Cursor::::get(), Some(MigrationCursor::Stuck)); + }); +} + +#[test] +fn historic_skipping_works() { + test_closure(|| { + MockedMigrations::set(vec![ + (SucceedAfter, 0), + (SucceedAfter, 0), // duplicate + (SucceedAfter, 1), + (SucceedAfter, 2), + (SucceedAfter, 1), // duplicate + ]); + + System::set_block_number(1); + Migrations::on_runtime_upgrade(); + run_to_block(10); + + // Just three historical ones, since two were added twice. + assert_eq!( + historic(), + vec![ + mocked_id(SucceedAfter, 0), + mocked_id(SucceedAfter, 1), + mocked_id(SucceedAfter, 2), + ] + ); + // Events received. + assert_events(vec![ + Event::UpgradeStarted { migrations: 5 }, + Event::MigrationCompleted { index: 0, took: 1 }, + Event::MigrationSkipped { index: 1 }, + Event::MigrationAdvanced { index: 2, took: 0 }, + Event::MigrationCompleted { index: 2, took: 1 }, + Event::MigrationAdvanced { index: 3, took: 0 }, + Event::MigrationAdvanced { index: 3, took: 1 }, + Event::MigrationCompleted { index: 3, took: 2 }, + Event::MigrationSkipped { index: 4 }, + Event::UpgradeCompleted, + ]); + assert_eq!(upgrades_started_completed_failed(), (1, 1, 0)); + + // Now go for another upgrade; just to make sure that it wont execute again. + System::reset_events(); + Migrations::on_runtime_upgrade(); + run_to_block(20); + + // Same historical ones as before. + assert_eq!( + historic(), + vec![ + mocked_id(SucceedAfter, 0), + mocked_id(SucceedAfter, 1), + mocked_id(SucceedAfter, 2), + ] + ); + + // Everything got skipped. + assert_events(vec![ + Event::UpgradeStarted { migrations: 5 }, + Event::MigrationSkipped { index: 0 }, + Event::MigrationSkipped { index: 1 }, + Event::MigrationSkipped { index: 2 }, + Event::MigrationSkipped { index: 3 }, + Event::MigrationSkipped { index: 4 }, + Event::UpgradeCompleted, + ]); + assert_eq!(upgrades_started_completed_failed(), (1, 1, 0)); + }); +} + +/// When another upgrade happens while a migration is still running, it should set the cursor to +/// stuck. +#[test] +fn upgrade_fails_when_migration_active() { + test_closure(|| { + MockedMigrations::set(vec![(SucceedAfter, 10)]); + + System::set_block_number(1); + Migrations::on_runtime_upgrade(); + run_to_block(3); + + // Events received. + assert_events(vec![ + Event::UpgradeStarted { migrations: 1 }, + Event::MigrationAdvanced { index: 0, took: 1 }, + Event::MigrationAdvanced { index: 0, took: 2 }, + ]); + assert_eq!(upgrades_started_completed_failed(), (1, 0, 0)); + + // Upgrade again. + Migrations::on_runtime_upgrade(); + // -- Defensive path -- + assert_eq!(Cursor::::get(), Some(MigrationCursor::Stuck)); + assert_events(vec![Event::UpgradeFailed]); + assert_eq!(upgrades_started_completed_failed(), (0, 0, 1)); + }); +} + +#[test] +fn migration_timeout_errors() { + test_closure(|| { + MockedMigrations::set(vec![(TimeoutAfter, 3)]); + + System::set_block_number(1); + Migrations::on_runtime_upgrade(); + run_to_block(5); + + // Times out after taking more than 3 steps. + assert_events(vec![ + Event::UpgradeStarted { migrations: 1 }, + Event::MigrationAdvanced { index: 0, took: 1 }, + Event::MigrationAdvanced { index: 0, took: 2 }, + Event::MigrationAdvanced { index: 0, took: 3 }, + Event::MigrationAdvanced { index: 0, took: 4 }, + Event::MigrationFailed { index: 0, took: 4 }, + Event::UpgradeFailed, + ]); + assert_eq!(upgrades_started_completed_failed(), (1, 0, 1)); + + // Failed migrations are not black-listed. + assert!(historic().is_empty()); + assert_eq!(Cursor::::get(), Some(MigrationCursor::Stuck)); + + Migrations::on_runtime_upgrade(); + run_to_block(6); + + assert_events(vec![Event::UpgradeFailed]); + assert_eq!(Cursor::::get(), Some(MigrationCursor::Stuck)); + assert_eq!(upgrades_started_completed_failed(), (0, 0, 1)); + }); +} diff --git a/substrate/frame/migrations/src/weights.rs b/substrate/frame/migrations/src/weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..c9b63258c44b7948f113161be6452b9a0f7348cc --- /dev/null +++ b/substrate/frame/migrations/src/weights.rs @@ -0,0 +1,358 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_migrations` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-04, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `loud1`, CPU: `AMD EPYC 7282 16-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` + +// Executed Command: +// target/release/substrate-node +// benchmark +// pallet +// --chain +// dev +// --pallet +// pallet-migrations +// --extrinsic +// +// --output +// weight.rs +// --template +// ../../polkadot-sdk/substrate/.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `pallet_migrations`. +pub trait WeightInfo { + fn onboard_new_mbms() -> Weight; + fn progress_mbms_none() -> Weight; + fn exec_migration_completed() -> Weight; + fn exec_migration_skipped_historic() -> Weight; + fn exec_migration_advance() -> Weight; + fn exec_migration_complete() -> Weight; + fn exec_migration_fail() -> Weight; + fn on_init_loop() -> Weight; + fn force_set_cursor() -> Weight; + fn force_set_active_cursor() -> Weight; + fn force_onboard_mbms() -> Weight; + fn clear_historic(n: u32, ) -> Weight; +} + +/// Weights for `pallet_migrations` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + fn onboard_new_mbms() -> Weight { + // Proof Size summary in bytes: + // Measured: `243` + // Estimated: `67035` + // Minimum execution time: 13_980_000 picoseconds. + Weight::from_parts(14_290_000, 67035) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn progress_mbms_none() -> Weight { + // Proof Size summary in bytes: + // Measured: `109` + // Estimated: `67035` + // Minimum execution time: 3_770_000 picoseconds. + Weight::from_parts(4_001_000, 67035) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn exec_migration_completed() -> Weight { + // Proof Size summary in bytes: + // Measured: `134` + // Estimated: `3599` + // Minimum execution time: 10_900_000 picoseconds. + Weight::from_parts(11_251_000, 3599) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + fn exec_migration_skipped_historic() -> Weight { + // Proof Size summary in bytes: + // Measured: `297` + // Estimated: `3762` + // Minimum execution time: 17_891_000 picoseconds. + Weight::from_parts(18_501_000, 3762) + .saturating_add(T::DbWeight::get().reads(2_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + fn exec_migration_advance() -> Weight { + // Proof Size summary in bytes: + // Measured: `243` + // Estimated: `3731` + // Minimum execution time: 18_271_000 picoseconds. + Weight::from_parts(18_740_000, 3731) + .saturating_add(T::DbWeight::get().reads(2_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:1) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + fn exec_migration_complete() -> Weight { + // Proof Size summary in bytes: + // Measured: `243` + // Estimated: `3731` + // Minimum execution time: 21_241_000 picoseconds. + Weight::from_parts(21_911_000, 3731) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn exec_migration_fail() -> Weight { + // Proof Size summary in bytes: + // Measured: `243` + // Estimated: `3731` + // Minimum execution time: 22_740_000 picoseconds. + Weight::from_parts(23_231_000, 3731) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + fn on_init_loop() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 440_000 picoseconds. + Weight::from_parts(500_000, 0) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn force_set_cursor() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 5_751_000 picoseconds. + Weight::from_parts(5_950_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn force_set_active_cursor() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_350_000 picoseconds. + Weight::from_parts(6_560_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + fn force_onboard_mbms() -> Weight { + // Proof Size summary in bytes: + // Measured: `218` + // Estimated: `67035` + // Minimum execution time: 11_121_000 picoseconds. + Weight::from_parts(11_530_000, 67035) + .saturating_add(T::DbWeight::get().reads(2_u64)) + } + /// Storage: `MultiBlockMigrations::Historic` (r:256 w:256) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 256]`. + fn clear_historic(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1089 + n * (271 ±0)` + // Estimated: `3834 + n * (2740 ±0)` + // Minimum execution time: 21_891_000 picoseconds. + Weight::from_parts(18_572_306, 3834) + // Standard Error: 3_236 + .saturating_add(Weight::from_parts(1_648_429, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2740).saturating_mul(n.into())) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + fn onboard_new_mbms() -> Weight { + // Proof Size summary in bytes: + // Measured: `243` + // Estimated: `67035` + // Minimum execution time: 13_980_000 picoseconds. + Weight::from_parts(14_290_000, 67035) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn progress_mbms_none() -> Weight { + // Proof Size summary in bytes: + // Measured: `109` + // Estimated: `67035` + // Minimum execution time: 3_770_000 picoseconds. + Weight::from_parts(4_001_000, 67035) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn exec_migration_completed() -> Weight { + // Proof Size summary in bytes: + // Measured: `134` + // Estimated: `3599` + // Minimum execution time: 10_900_000 picoseconds. + Weight::from_parts(11_251_000, 3599) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + fn exec_migration_skipped_historic() -> Weight { + // Proof Size summary in bytes: + // Measured: `297` + // Estimated: `3762` + // Minimum execution time: 17_891_000 picoseconds. + Weight::from_parts(18_501_000, 3762) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + fn exec_migration_advance() -> Weight { + // Proof Size summary in bytes: + // Measured: `243` + // Estimated: `3731` + // Minimum execution time: 18_271_000 picoseconds. + Weight::from_parts(18_740_000, 3731) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:1) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + fn exec_migration_complete() -> Weight { + // Proof Size summary in bytes: + // Measured: `243` + // Estimated: `3731` + // Minimum execution time: 21_241_000 picoseconds. + Weight::from_parts(21_911_000, 3731) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn exec_migration_fail() -> Weight { + // Proof Size summary in bytes: + // Measured: `243` + // Estimated: `3731` + // Minimum execution time: 22_740_000 picoseconds. + Weight::from_parts(23_231_000, 3731) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + fn on_init_loop() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 440_000 picoseconds. + Weight::from_parts(500_000, 0) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn force_set_cursor() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 5_751_000 picoseconds. + Weight::from_parts(5_950_000, 0) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn force_set_active_cursor() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_350_000 picoseconds. + Weight::from_parts(6_560_000, 0) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + fn force_onboard_mbms() -> Weight { + // Proof Size summary in bytes: + // Measured: `218` + // Estimated: `67035` + // Minimum execution time: 11_121_000 picoseconds. + Weight::from_parts(11_530_000, 67035) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + } + /// Storage: `MultiBlockMigrations::Historic` (r:256 w:256) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 256]`. + fn clear_historic(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1089 + n * (271 ±0)` + // Estimated: `3834 + n * (2740 ±0)` + // Minimum execution time: 21_891_000 picoseconds. + Weight::from_parts(18_572_306, 3834) + // Standard Error: 3_236 + .saturating_add(Weight::from_parts(1_648_429, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2740).saturating_mul(n.into())) + } +} diff --git a/substrate/frame/mixnet/Cargo.toml b/substrate/frame/mixnet/Cargo.toml index e0334b6692dcc1d0d203f9eec5ef998cd4a213cb..d1bb01dde1a47e70587dc37ad27123a0632cdcf7 100644 --- a/substrate/frame/mixnet/Cargo.toml +++ b/substrate/frame/mixnet/Cargo.toml @@ -22,7 +22,7 @@ frame-support = { default-features = false, path = "../support" } frame-system = { default-features = false, path = "../system" } log = { workspace = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", default-features = false, features = ["derive"] } +serde = { features = ["derive"], workspace = true } sp-application-crypto = { default-features = false, path = "../../primitives/application-crypto" } sp-arithmetic = { default-features = false, path = "../../primitives/arithmetic" } sp-io = { default-features = false, path = "../../primitives/io" } diff --git a/substrate/frame/multisig/src/lib.rs b/substrate/frame/multisig/src/lib.rs index e4426c64b4125fe267a1e30a42a7196274b05e4e..a83b78e316f500ddc9c615420c2ac627b90ee7e9 100644 --- a/substrate/frame/multisig/src/lib.rs +++ b/substrate/frame/multisig/src/lib.rs @@ -168,7 +168,7 @@ pub mod pallet { type WeightInfo: WeightInfo; } - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] diff --git a/substrate/frame/multisig/src/migrations.rs b/substrate/frame/multisig/src/migrations.rs index d03e42a66a5b5af07248ee7b11061d5f26b66c98..e6402600d0d368783e30f43e0afc3adad4d0a1ba 100644 --- a/substrate/frame/multisig/src/migrations.rs +++ b/substrate/frame/multisig/src/migrations.rs @@ -51,7 +51,7 @@ pub mod v1 { fn on_runtime_upgrade() -> Weight { use sp_runtime::Saturating; - let current = Pallet::::current_storage_version(); + let current = Pallet::::in_code_storage_version(); let onchain = Pallet::::on_chain_storage_version(); if onchain > 0 { diff --git a/substrate/frame/nfts/src/lib.rs b/substrate/frame/nfts/src/lib.rs index a7d505e2e397dbca5f547ed9ed1a7c3cc4235aa5..615720268fed611bbf7effa1f4b71eca0df4a7e7 100644 --- a/substrate/frame/nfts/src/lib.rs +++ b/substrate/frame/nfts/src/lib.rs @@ -76,7 +76,7 @@ pub mod pallet { use frame_support::{pallet_prelude::*, traits::ExistenceRequirement}; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] @@ -874,8 +874,8 @@ pub mod pallet { ), ); Self::deposit_event(Event::PalletAttributeSet { - collection, - item: Some(item), + collection: collection_id, + item: Some(owned_item), attribute: pallet_attribute, value: attribute_value, }); diff --git a/substrate/frame/nfts/src/migration.rs b/substrate/frame/nfts/src/migration.rs index d4cdf7e820d15b7df1d8f36807baf34b81c34bd3..8f82e092262fb2d63cf1dda563afb0dd11c6f6ac 100644 --- a/substrate/frame/nfts/src/migration.rs +++ b/substrate/frame/nfts/src/migration.rs @@ -54,17 +54,17 @@ pub mod v1 { pub struct MigrateToV1(core::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV1 { fn on_runtime_upgrade() -> Weight { - let current_version = Pallet::::current_storage_version(); - let onchain_version = Pallet::::on_chain_storage_version(); + let in_code_version = Pallet::::in_code_storage_version(); + let on_chain_version = Pallet::::on_chain_storage_version(); log::info!( target: LOG_TARGET, - "Running migration with current storage version {:?} / onchain {:?}", - current_version, - onchain_version + "Running migration with in-code storage version {:?} / onchain {:?}", + in_code_version, + on_chain_version ); - if onchain_version == 0 && current_version == 1 { + if on_chain_version == 0 && in_code_version == 1 { let mut translated = 0u64; let mut configs_iterated = 0u64; Collection::::translate::< @@ -77,13 +77,13 @@ pub mod v1 { Some(old_value.migrate_to_v1(item_configs)) }); - current_version.put::>(); + in_code_version.put::>(); log::info!( target: LOG_TARGET, "Upgraded {} records, storage to version {:?}", translated, - current_version + in_code_version ); T::DbWeight::get().reads_writes(translated + configs_iterated + 1, translated + 1) } else { diff --git a/substrate/frame/nfts/src/tests.rs b/substrate/frame/nfts/src/tests.rs index 9e521537534fab31c4626c21ecf06e3df366c01b..6bf9427f4e6cda9ef2eecaf9abed2721e7127ecf 100644 --- a/substrate/frame/nfts/src/tests.rs +++ b/substrate/frame/nfts/src/tests.rs @@ -440,6 +440,12 @@ fn mint_should_work() { account(2), Some(MintWitness { owned_item: Some(43), ..Default::default() }) )); + assert!(events().contains(&Event::::PalletAttributeSet { + collection: 0, + item: Some(43), + attribute: PalletAttributes::<::CollectionId>::UsedToClaim(1), + value: Nfts::construct_attribute_value(vec![]).unwrap(), + })); // can't mint twice assert_noop!( diff --git a/substrate/frame/nomination-pools/src/lib.rs b/substrate/frame/nomination-pools/src/lib.rs index 074d59931ade733a3199e227a99c6dff819be793..a0dbdb3aaa47f6108d6b1f0fec92dc4d6826d624 100644 --- a/substrate/frame/nomination-pools/src/lib.rs +++ b/substrate/frame/nomination-pools/src/lib.rs @@ -1576,7 +1576,7 @@ pub mod pallet { use frame_system::{ensure_signed, pallet_prelude::*}; use sp_runtime::Perbill; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(8); #[pallet::pallet] @@ -2401,6 +2401,11 @@ pub mod pallet { /// /// This directly forward the call to the staking pallet, on behalf of the pool bonded /// account. + /// + /// # Note + /// + /// In addition to a `root` or `nominator` role of `origin`, pool's depositor needs to have + /// at least `depositor_min_bond` in the pool to start nominating. #[pallet::call_index(8)] #[pallet::weight(T::WeightInfo::nominate(validators.len() as u32))] pub fn nominate( @@ -2411,6 +2416,16 @@ pub mod pallet { let who = ensure_signed(origin)?; let bonded_pool = BondedPool::::get(pool_id).ok_or(Error::::PoolNotFound)?; ensure!(bonded_pool.can_nominate(&who), Error::::NotNominator); + + let depositor_points = PoolMembers::::get(&bonded_pool.roles.depositor) + .ok_or(Error::::PoolMemberNotFound)? + .active_points(); + + ensure!( + bonded_pool.points_to_balance(depositor_points) >= Self::depositor_min_bond(), + Error::::MinimumBondNotMet + ); + T::Staking::nominate(&bonded_pool.bonded_account(), validators) } @@ -2573,17 +2588,37 @@ pub mod pallet { /// Chill on behalf of the pool. /// - /// The dispatch origin of this call must be signed by the pool nominator or the pool + /// The dispatch origin of this call can be signed by the pool nominator or the pool /// root role, same as [`Pallet::nominate`]. /// + /// Under certain conditions, this call can be dispatched permissionlessly (i.e. by any + /// account). + /// + /// # Conditions for a permissionless dispatch: + /// * When pool depositor has less than `MinNominatorBond` staked, otherwise pool members + /// are unable to unbond. + /// + /// # Conditions for permissioned dispatch: + /// * The caller has a nominator or root role of the pool. /// This directly forward the call to the staking pallet, on behalf of the pool bonded /// account. #[pallet::call_index(13)] #[pallet::weight(T::WeightInfo::chill())] pub fn chill(origin: OriginFor, pool_id: PoolId) -> DispatchResult { let who = ensure_signed(origin)?; + let bonded_pool = BondedPool::::get(pool_id).ok_or(Error::::PoolNotFound)?; - ensure!(bonded_pool.can_nominate(&who), Error::::NotNominator); + + let depositor_points = PoolMembers::::get(&bonded_pool.roles.depositor) + .ok_or(Error::::PoolMemberNotFound)? + .active_points(); + + if bonded_pool.points_to_balance(depositor_points) >= + T::Staking::minimum_nominator_bond() + { + ensure!(bonded_pool.can_nominate(&who), Error::::NotNominator); + } + T::Staking::chill(&bonded_pool.bonded_account()) } diff --git a/substrate/frame/nomination-pools/src/migration.rs b/substrate/frame/nomination-pools/src/migration.rs index 6887fcfa7ecad36172c17e36d55713df27865482..14410339f59a4dc809187feb6d289b16ec52f79c 100644 --- a/substrate/frame/nomination-pools/src/migration.rs +++ b/substrate/frame/nomination-pools/src/migration.rs @@ -342,25 +342,25 @@ pub mod v5 { pub struct MigrateToV5(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV5 { fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); + let in_code = Pallet::::in_code_storage_version(); let onchain = Pallet::::on_chain_storage_version(); log!( info, - "Running migration with current storage version {:?} / onchain {:?}", - current, + "Running migration with in-code storage version {:?} / onchain {:?}", + in_code, onchain ); - if current == 5 && onchain == 4 { + if in_code == 5 && onchain == 4 { let mut translated = 0u64; RewardPools::::translate::, _>(|_id, old_value| { translated.saturating_inc(); Some(old_value.migrate_to_v5()) }); - current.put::>(); - log!(info, "Upgraded {} pools, storage to version {:?}", translated, current); + in_code.put::>(); + log!(info, "Upgraded {} pools, storage to version {:?}", translated, in_code); // reads: translated + onchain version. // writes: translated + current.put. @@ -498,12 +498,12 @@ pub mod v4 { #[allow(deprecated)] impl> OnRuntimeUpgrade for MigrateToV4 { fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); + let current = Pallet::::in_code_storage_version(); let onchain = Pallet::::on_chain_storage_version(); log!( info, - "Running migration with current storage version {:?} / onchain {:?}", + "Running migration with in-code storage version {:?} / onchain {:?}", current, onchain ); @@ -579,13 +579,13 @@ pub mod v3 { pub struct MigrateToV3(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV3 { fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); + let current = Pallet::::in_code_storage_version(); let onchain = Pallet::::on_chain_storage_version(); if onchain == 2 { log!( info, - "Running migration with current storage version {:?} / onchain {:?}", + "Running migration with in-code storage version {:?} / onchain {:?}", current, onchain ); @@ -859,12 +859,12 @@ pub mod v2 { impl OnRuntimeUpgrade for MigrateToV2 { fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); + let current = Pallet::::in_code_storage_version(); let onchain = Pallet::::on_chain_storage_version(); log!( info, - "Running migration with current storage version {:?} / onchain {:?}", + "Running migration with in-code storage version {:?} / onchain {:?}", current, onchain ); @@ -976,12 +976,12 @@ pub mod v1 { pub struct MigrateToV1(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV1 { fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); + let current = Pallet::::in_code_storage_version(); let onchain = Pallet::::on_chain_storage_version(); log!( info, - "Running migration with current storage version {:?} / onchain {:?}", + "Running migration with in-code storage version {:?} / onchain {:?}", current, onchain ); diff --git a/substrate/frame/nomination-pools/src/tests.rs b/substrate/frame/nomination-pools/src/tests.rs index 13298fa64f54c0439384fcb8fba7b512678ff687..8fb2b41b88a190df444162a58d0a134936d81272 100644 --- a/substrate/frame/nomination-pools/src/tests.rs +++ b/substrate/frame/nomination-pools/src/tests.rs @@ -4848,6 +4848,18 @@ mod nominate { Error::::NotNominator ); + // if `depositor` stake is less than the `MinimumNominatorBond`, they can't nominate + StakingMinBond::set(20); + + // Can't nominate if depositor's stake is less than the `MinimumNominatorBond` + assert_noop!( + Pools::nominate(RuntimeOrigin::signed(900), 1, vec![21]), + Error::::MinimumBondNotMet + ); + + // restore `MinimumNominatorBond` + StakingMinBond::set(10); + // Root can nominate assert_ok!(Pools::nominate(RuntimeOrigin::signed(900), 1, vec![21])); assert_eq!(Nominations::get().unwrap(), vec![21]); @@ -7338,3 +7350,33 @@ mod slash { }); } } + +mod chill { + use super::*; + + #[test] + fn chill_works() { + ExtBuilder::default().build_and_execute(|| { + // only nominator or root can chill + assert_noop!( + Pools::chill(RuntimeOrigin::signed(10), 1), + Error::::NotNominator + ); + + // root can chill and re-nominate + assert_ok!(Pools::chill(RuntimeOrigin::signed(900), 1)); + assert_ok!(Pools::nominate(RuntimeOrigin::signed(900), 1, vec![31])); + + // nominator can chill and re-nominate + assert_ok!(Pools::chill(RuntimeOrigin::signed(901), 1)); + assert_ok!(Pools::nominate(RuntimeOrigin::signed(901), 1, vec![31])); + + // if `depositor` stake is less than the `MinimumNominatorBond`, then this call + // becomes permissionless; + StakingMinBond::set(20); + + // any account can chill + assert_ok!(Pools::chill(RuntimeOrigin::signed(10), 1)); + }) + } +} diff --git a/substrate/frame/nomination-pools/src/weights.rs b/substrate/frame/nomination-pools/src/weights.rs index 047a17c3f9a278ac9564562edf318f13ecc2f7ca..0b8c1d22fa19872711fead9fd7441b26d97e19d6 100644 --- a/substrate/frame/nomination-pools/src/weights.rs +++ b/substrate/frame/nomination-pools/src/weights.rs @@ -17,14 +17,15 @@ //! Autogenerated weights for `pallet_nomination_pools` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-23, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-p5qp1txx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: // target/production/substrate-node +// target/production/substrate-node // benchmark // pallet // --steps=50 @@ -35,8 +36,12 @@ // --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=pallet_nomination_pools // --chain=dev +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_nomination_pools +// --chain=dev // --header=./substrate/HEADER-APACHE2 // --output=./substrate/frame/nomination-pools/src/weights.rs +// --output=./substrate/frame/nomination-pools/src/weights.rs // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -112,8 +117,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3425` // Estimated: `8877` - // Minimum execution time: 184_295_000 picoseconds. - Weight::from_parts(188_860_000, 8877) + // Minimum execution time: 182_643_000 picoseconds. + Weight::from_parts(186_106_000, 8877) .saturating_add(T::DbWeight::get().reads(20_u64)) .saturating_add(T::DbWeight::get().writes(13_u64)) } @@ -145,8 +150,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3435` // Estimated: `8877` - // Minimum execution time: 188_777_000 picoseconds. - Weight::from_parts(192_646_000, 8877) + // Minimum execution time: 181_464_000 picoseconds. + Weight::from_parts(184_672_000, 8877) .saturating_add(T::DbWeight::get().reads(17_u64)) .saturating_add(T::DbWeight::get().writes(13_u64)) } @@ -180,8 +185,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3500` // Estimated: `8877` - // Minimum execution time: 221_728_000 picoseconds. - Weight::from_parts(227_569_000, 8877) + // Minimum execution time: 216_182_000 picoseconds. + Weight::from_parts(218_448_000, 8877) .saturating_add(T::DbWeight::get().reads(18_u64)) .saturating_add(T::DbWeight::get().writes(14_u64)) } @@ -201,8 +206,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1172` // Estimated: `3719` - // Minimum execution time: 75_310_000 picoseconds. - Weight::from_parts(77_709_000, 3719) + // Minimum execution time: 73_830_000 picoseconds. + Weight::from_parts(75_271_000, 3719) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -242,8 +247,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3622` // Estimated: `27847` - // Minimum execution time: 170_656_000 picoseconds. - Weight::from_parts(174_950_000, 27847) + // Minimum execution time: 166_099_000 picoseconds. + Weight::from_parts(170_355_000, 27847) .saturating_add(T::DbWeight::get().reads(20_u64)) .saturating_add(T::DbWeight::get().writes(13_u64)) } @@ -266,11 +271,11 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1817` // Estimated: `4764` - // Minimum execution time: 68_866_000 picoseconds. - Weight::from_parts(72_312_887, 4764) - // Standard Error: 1_635 - .saturating_add(Weight::from_parts(41_679, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(7_u64)) + // Minimum execution time: 64_787_000 picoseconds. + Weight::from_parts(67_920_914, 4764) + // Standard Error: 1_482 + .saturating_add(Weight::from_parts(43_264, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) @@ -302,11 +307,11 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2207` // Estimated: `27847` - // Minimum execution time: 131_383_000 picoseconds. - Weight::from_parts(136_595_971, 27847) - // Standard Error: 2_715 - .saturating_add(Weight::from_parts(52_351, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) + // Minimum execution time: 124_990_000 picoseconds. + Weight::from_parts(129_041_398, 27847) + // Standard Error: 2_335 + .saturating_add(Weight::from_parts(67_889, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(9_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) @@ -356,12 +361,14 @@ impl WeightInfo for SubstrateWeight { /// Storage: `NominationPools::ClaimPermissions` (r:0 w:1) /// Proof: `NominationPools::ClaimPermissions` (`max_values`: None, `max_size`: Some(41), added: 2516, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. - fn withdraw_unbonded_kill(_s: u32, ) -> Weight { + fn withdraw_unbonded_kill(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `2525` // Estimated: `27847` - // Minimum execution time: 233_314_000 picoseconds. - Weight::from_parts(241_694_316, 27847) + // Minimum execution time: 221_955_000 picoseconds. + Weight::from_parts(230_244_437, 27847) + // Standard Error: 4_059 + .saturating_add(Weight::from_parts(7_522, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(24_u64)) .saturating_add(T::DbWeight::get().writes(20_u64)) } @@ -413,19 +420,25 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1169` // Estimated: `8538` - // Minimum execution time: 171_465_000 picoseconds. - Weight::from_parts(176_478_000, 8538) + // Minimum execution time: 165_358_000 picoseconds. + Weight::from_parts(169_683_000, 8538) .saturating_add(T::DbWeight::get().reads(23_u64)) .saturating_add(T::DbWeight::get().writes(17_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::PoolMembers` (r:1 w:0) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::MinNominatorBond` (r:1 w:0) /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::MinCreateBond` (r:1 w:0) + /// Proof: `NominationPools::MinCreateBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::MinJoinBond` (r:1 w:0) + /// Proof: `NominationPools::MinJoinBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:1) /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxNominatorsCount` (r:1 w:0) @@ -445,13 +458,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1808` + // Measured: `1976` // Estimated: `4556 + n * (2520 ±0)` - // Minimum execution time: 63_588_000 picoseconds. - Weight::from_parts(64_930_584, 4556) - // Standard Error: 9_167 - .saturating_add(Weight::from_parts(1_595_779, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(12_u64)) + // Minimum execution time: 74_483_000 picoseconds. + Weight::from_parts(76_611_288, 4556) + // Standard Error: 9_013 + .saturating_add(Weight::from_parts(1_475_128, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(15_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(5_u64)) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(n.into())) @@ -466,8 +479,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1434` // Estimated: `4556` - // Minimum execution time: 32_899_000 picoseconds. - Weight::from_parts(33_955_000, 4556) + // Minimum execution time: 32_598_000 picoseconds. + Weight::from_parts(33_350_000, 4556) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -482,10 +495,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3735` - // Minimum execution time: 13_778_000 picoseconds. - Weight::from_parts(14_770_006, 3735) - // Standard Error: 151 - .saturating_add(Weight::from_parts(1_900, 0).saturating_mul(n.into())) + // Minimum execution time: 13_705_000 picoseconds. + Weight::from_parts(14_594_211, 3735) + // Standard Error: 141 + .saturating_add(Weight::from_parts(2_119, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -505,8 +518,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_550_000 picoseconds. - Weight::from_parts(4_935_000, 0) + // Minimum execution time: 4_222_000 picoseconds. + Weight::from_parts(4_527_000, 0) .saturating_add(T::DbWeight::get().writes(6_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) @@ -515,17 +528,21 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3719` - // Minimum execution time: 16_759_000 picoseconds. - Weight::from_parts(17_346_000, 3719) + // Minimum execution time: 16_106_000 picoseconds. + Weight::from_parts(17_365_000, 3719) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::PoolMembers` (r:1 w:0) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:1 w:0) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::Validators` (r:1 w:0) /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:1) @@ -540,11 +557,11 @@ impl WeightInfo for SubstrateWeight { /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill() -> Weight { // Proof Size summary in bytes: - // Measured: `1971` + // Measured: `2143` // Estimated: `4556` - // Minimum execution time: 61_970_000 picoseconds. - Weight::from_parts(63_738_000, 4556) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Minimum execution time: 71_301_000 picoseconds. + Weight::from_parts(73_626_000, 4556) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) @@ -559,8 +576,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `804` // Estimated: `3719` - // Minimum execution time: 31_950_000 picoseconds. - Weight::from_parts(33_190_000, 3719) + // Minimum execution time: 32_363_000 picoseconds. + Weight::from_parts(33_400_000, 3719) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -572,8 +589,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `572` // Estimated: `3719` - // Minimum execution time: 16_807_000 picoseconds. - Weight::from_parts(17_733_000, 3719) + // Minimum execution time: 16_686_000 picoseconds. + Weight::from_parts(17_294_000, 3719) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -583,8 +600,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3719` - // Minimum execution time: 16_710_000 picoseconds. - Weight::from_parts(17_563_000, 3719) + // Minimum execution time: 16_417_000 picoseconds. + Weight::from_parts(17_028_000, 3719) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -594,8 +611,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3719` - // Minimum execution time: 16_493_000 picoseconds. - Weight::from_parts(17_022_000, 3719) + // Minimum execution time: 16_350_000 picoseconds. + Weight::from_parts(16_905_000, 3719) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -607,8 +624,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `542` // Estimated: `3702` - // Minimum execution time: 14_248_000 picoseconds. - Weight::from_parts(15_095_000, 3702) + // Minimum execution time: 14_081_000 picoseconds. + Weight::from_parts(14_608_000, 3702) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -624,8 +641,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1002` // Estimated: `3719` - // Minimum execution time: 61_969_000 picoseconds. - Weight::from_parts(63_965_000, 3719) + // Minimum execution time: 62_178_000 picoseconds. + Weight::from_parts(64_039_000, 3719) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -641,8 +658,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `901` // Estimated: `4764` - // Minimum execution time: 65_462_000 picoseconds. - Weight::from_parts(67_250_000, 4764) + // Minimum execution time: 64_880_000 picoseconds. + Weight::from_parts(66_541_000, 4764) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -686,8 +703,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3425` // Estimated: `8877` - // Minimum execution time: 184_295_000 picoseconds. - Weight::from_parts(188_860_000, 8877) + // Minimum execution time: 182_643_000 picoseconds. + Weight::from_parts(186_106_000, 8877) .saturating_add(RocksDbWeight::get().reads(20_u64)) .saturating_add(RocksDbWeight::get().writes(13_u64)) } @@ -719,8 +736,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3435` // Estimated: `8877` - // Minimum execution time: 188_777_000 picoseconds. - Weight::from_parts(192_646_000, 8877) + // Minimum execution time: 181_464_000 picoseconds. + Weight::from_parts(184_672_000, 8877) .saturating_add(RocksDbWeight::get().reads(17_u64)) .saturating_add(RocksDbWeight::get().writes(13_u64)) } @@ -754,8 +771,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3500` // Estimated: `8877` - // Minimum execution time: 221_728_000 picoseconds. - Weight::from_parts(227_569_000, 8877) + // Minimum execution time: 216_182_000 picoseconds. + Weight::from_parts(218_448_000, 8877) .saturating_add(RocksDbWeight::get().reads(18_u64)) .saturating_add(RocksDbWeight::get().writes(14_u64)) } @@ -775,8 +792,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1172` // Estimated: `3719` - // Minimum execution time: 75_310_000 picoseconds. - Weight::from_parts(77_709_000, 3719) + // Minimum execution time: 73_830_000 picoseconds. + Weight::from_parts(75_271_000, 3719) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -816,8 +833,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3622` // Estimated: `27847` - // Minimum execution time: 170_656_000 picoseconds. - Weight::from_parts(174_950_000, 27847) + // Minimum execution time: 166_099_000 picoseconds. + Weight::from_parts(170_355_000, 27847) .saturating_add(RocksDbWeight::get().reads(20_u64)) .saturating_add(RocksDbWeight::get().writes(13_u64)) } @@ -840,11 +857,11 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1817` // Estimated: `4764` - // Minimum execution time: 68_866_000 picoseconds. - Weight::from_parts(72_312_887, 4764) - // Standard Error: 1_635 - .saturating_add(Weight::from_parts(41_679, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(7_u64)) + // Minimum execution time: 64_787_000 picoseconds. + Weight::from_parts(67_920_914, 4764) + // Standard Error: 1_482 + .saturating_add(Weight::from_parts(43_264, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) @@ -876,11 +893,11 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2207` // Estimated: `27847` - // Minimum execution time: 131_383_000 picoseconds. - Weight::from_parts(136_595_971, 27847) - // Standard Error: 2_715 - .saturating_add(Weight::from_parts(52_351, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) + // Minimum execution time: 124_990_000 picoseconds. + Weight::from_parts(129_041_398, 27847) + // Standard Error: 2_335 + .saturating_add(Weight::from_parts(67_889, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(9_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) @@ -930,12 +947,14 @@ impl WeightInfo for () { /// Storage: `NominationPools::ClaimPermissions` (r:0 w:1) /// Proof: `NominationPools::ClaimPermissions` (`max_values`: None, `max_size`: Some(41), added: 2516, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. - fn withdraw_unbonded_kill(_s: u32, ) -> Weight { + fn withdraw_unbonded_kill(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `2525` // Estimated: `27847` - // Minimum execution time: 233_314_000 picoseconds. - Weight::from_parts(241_694_316, 27847) + // Minimum execution time: 221_955_000 picoseconds. + Weight::from_parts(230_244_437, 27847) + // Standard Error: 4_059 + .saturating_add(Weight::from_parts(7_522, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(24_u64)) .saturating_add(RocksDbWeight::get().writes(20_u64)) } @@ -987,19 +1006,25 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1169` // Estimated: `8538` - // Minimum execution time: 171_465_000 picoseconds. - Weight::from_parts(176_478_000, 8538) + // Minimum execution time: 165_358_000 picoseconds. + Weight::from_parts(169_683_000, 8538) .saturating_add(RocksDbWeight::get().reads(23_u64)) .saturating_add(RocksDbWeight::get().writes(17_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::PoolMembers` (r:1 w:0) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::MinNominatorBond` (r:1 w:0) /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::MinCreateBond` (r:1 w:0) + /// Proof: `NominationPools::MinCreateBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::MinJoinBond` (r:1 w:0) + /// Proof: `NominationPools::MinJoinBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:1) /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxNominatorsCount` (r:1 w:0) @@ -1019,13 +1044,13 @@ impl WeightInfo for () { /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1808` + // Measured: `1976` // Estimated: `4556 + n * (2520 ±0)` - // Minimum execution time: 63_588_000 picoseconds. - Weight::from_parts(64_930_584, 4556) - // Standard Error: 9_167 - .saturating_add(Weight::from_parts(1_595_779, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(12_u64)) + // Minimum execution time: 74_483_000 picoseconds. + Weight::from_parts(76_611_288, 4556) + // Standard Error: 9_013 + .saturating_add(Weight::from_parts(1_475_128, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(15_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(5_u64)) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(n.into())) @@ -1040,8 +1065,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1434` // Estimated: `4556` - // Minimum execution time: 32_899_000 picoseconds. - Weight::from_parts(33_955_000, 4556) + // Minimum execution time: 32_598_000 picoseconds. + Weight::from_parts(33_350_000, 4556) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1056,10 +1081,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3735` - // Minimum execution time: 13_778_000 picoseconds. - Weight::from_parts(14_770_006, 3735) - // Standard Error: 151 - .saturating_add(Weight::from_parts(1_900, 0).saturating_mul(n.into())) + // Minimum execution time: 13_705_000 picoseconds. + Weight::from_parts(14_594_211, 3735) + // Standard Error: 141 + .saturating_add(Weight::from_parts(2_119, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1079,8 +1104,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_550_000 picoseconds. - Weight::from_parts(4_935_000, 0) + // Minimum execution time: 4_222_000 picoseconds. + Weight::from_parts(4_527_000, 0) .saturating_add(RocksDbWeight::get().writes(6_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) @@ -1089,17 +1114,21 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3719` - // Minimum execution time: 16_759_000 picoseconds. - Weight::from_parts(17_346_000, 3719) + // Minimum execution time: 16_106_000 picoseconds. + Weight::from_parts(17_365_000, 3719) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::PoolMembers` (r:1 w:0) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:1 w:0) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::Validators` (r:1 w:0) /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:1) @@ -1114,11 +1143,11 @@ impl WeightInfo for () { /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill() -> Weight { // Proof Size summary in bytes: - // Measured: `1971` + // Measured: `2143` // Estimated: `4556` - // Minimum execution time: 61_970_000 picoseconds. - Weight::from_parts(63_738_000, 4556) - .saturating_add(RocksDbWeight::get().reads(9_u64)) + // Minimum execution time: 71_301_000 picoseconds. + Weight::from_parts(73_626_000, 4556) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) @@ -1133,8 +1162,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `804` // Estimated: `3719` - // Minimum execution time: 31_950_000 picoseconds. - Weight::from_parts(33_190_000, 3719) + // Minimum execution time: 32_363_000 picoseconds. + Weight::from_parts(33_400_000, 3719) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1146,8 +1175,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `572` // Estimated: `3719` - // Minimum execution time: 16_807_000 picoseconds. - Weight::from_parts(17_733_000, 3719) + // Minimum execution time: 16_686_000 picoseconds. + Weight::from_parts(17_294_000, 3719) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1157,8 +1186,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3719` - // Minimum execution time: 16_710_000 picoseconds. - Weight::from_parts(17_563_000, 3719) + // Minimum execution time: 16_417_000 picoseconds. + Weight::from_parts(17_028_000, 3719) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1168,8 +1197,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3719` - // Minimum execution time: 16_493_000 picoseconds. - Weight::from_parts(17_022_000, 3719) + // Minimum execution time: 16_350_000 picoseconds. + Weight::from_parts(16_905_000, 3719) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1181,8 +1210,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `542` // Estimated: `3702` - // Minimum execution time: 14_248_000 picoseconds. - Weight::from_parts(15_095_000, 3702) + // Minimum execution time: 14_081_000 picoseconds. + Weight::from_parts(14_608_000, 3702) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1198,8 +1227,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1002` // Estimated: `3719` - // Minimum execution time: 61_969_000 picoseconds. - Weight::from_parts(63_965_000, 3719) + // Minimum execution time: 62_178_000 picoseconds. + Weight::from_parts(64_039_000, 3719) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1215,8 +1244,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `901` // Estimated: `4764` - // Minimum execution time: 65_462_000 picoseconds. - Weight::from_parts(67_250_000, 4764) + // Minimum execution time: 64_880_000 picoseconds. + Weight::from_parts(66_541_000, 4764) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/substrate/frame/nomination-pools/test-staking/src/lib.rs b/substrate/frame/nomination-pools/test-staking/src/lib.rs index 865b7a71e68840d00b42c7cf11e6916c8debf5ad..98bff537c90819be34625d099cb55abcaafb10ae 100644 --- a/substrate/frame/nomination-pools/test-staking/src/lib.rs +++ b/substrate/frame/nomination-pools/test-staking/src/lib.rs @@ -22,10 +22,12 @@ mod mock; use frame_support::{assert_noop, assert_ok, traits::Currency}; use mock::*; use pallet_nomination_pools::{ - BondedPools, Error as PoolsError, Event as PoolsEvent, LastPoolId, PoolMember, PoolMembers, - PoolState, + BondExtra, BondedPools, Error as PoolsError, Event as PoolsEvent, LastPoolId, PoolMember, + PoolMembers, PoolState, +}; +use pallet_staking::{ + CurrentEra, Error as StakingError, Event as StakingEvent, Payee, RewardDestination, }; -use pallet_staking::{CurrentEra, Event as StakingEvent, Payee, RewardDestination}; use sp_runtime::{bounded_btree_map, traits::Zero}; #[test] @@ -191,6 +193,131 @@ fn pool_lifecycle_e2e() { }) } +#[test] +fn pool_chill_e2e() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::minimum_balance(), 5); + assert_eq!(Staking::current_era(), None); + + // create the pool, we know this has id 1. + assert_ok!(Pools::create(RuntimeOrigin::signed(10), 50, 10, 10, 10)); + assert_eq!(LastPoolId::::get(), 1); + + // have the pool nominate. + assert_ok!(Pools::nominate(RuntimeOrigin::signed(10), 1, vec![1, 2, 3])); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 50 }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Created { depositor: 10, pool_id: 1 }, + PoolsEvent::Bonded { member: 10, pool_id: 1, bonded: 50, joined: true }, + ] + ); + + // have two members join + assert_ok!(Pools::join(RuntimeOrigin::signed(20), 10, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(21), 10, 1)); + + assert_eq!( + staking_events_since_last_call(), + vec![ + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 }, + ] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Bonded { member: 20, pool_id: 1, bonded: 10, joined: true }, + PoolsEvent::Bonded { member: 21, pool_id: 1, bonded: 10, joined: true }, + ] + ); + + // in case depositor does not have more than `MinNominatorBond` staked, we can end up in + // situation where a member unbonding would cause pool balance to drop below + // `MinNominatorBond` and hence not allowed. This can happen if the `MinNominatorBond` is + // increased after the pool is created. + assert_ok!(Staking::set_staking_configs( + RuntimeOrigin::root(), + pallet_staking::ConfigOp::Set(55), // minimum nominator bond + pallet_staking::ConfigOp::Noop, + pallet_staking::ConfigOp::Noop, + pallet_staking::ConfigOp::Noop, + pallet_staking::ConfigOp::Noop, + pallet_staking::ConfigOp::Noop, + pallet_staking::ConfigOp::Noop, + )); + + // members can unbond as long as total stake of the pool is above min nominator bond + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 10),); + assert_eq!(PoolMembers::::get(20).unwrap().unbonding_eras.len(), 1); + assert_eq!(PoolMembers::::get(20).unwrap().points, 0); + + // this member cannot unbond since it will cause `pool stake < MinNominatorBond` + assert_noop!( + Pools::unbond(RuntimeOrigin::signed(21), 21, 10), + StakingError::::InsufficientBond, + ); + + // members can call `chill` permissionlessly now + assert_ok!(Pools::chill(RuntimeOrigin::signed(20), 1)); + + // now another member can unbond. + assert_ok!(Pools::unbond(RuntimeOrigin::signed(21), 21, 10)); + assert_eq!(PoolMembers::::get(21).unwrap().unbonding_eras.len(), 1); + assert_eq!(PoolMembers::::get(21).unwrap().points, 0); + + // nominator can not resume nomination until depositor have enough stake + assert_noop!( + Pools::nominate(RuntimeOrigin::signed(10), 1, vec![1, 2, 3]), + PoolsError::::MinimumBondNotMet, + ); + + // other members joining pool does not affect the depositor's ability to resume nomination + assert_ok!(Pools::join(RuntimeOrigin::signed(22), 10, 1)); + + assert_noop!( + Pools::nominate(RuntimeOrigin::signed(10), 1, vec![1, 2, 3]), + PoolsError::::MinimumBondNotMet, + ); + + // depositor can bond extra stake + assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(10), BondExtra::FreeBalance(10))); + + // `chill` can not be called permissionlessly anymore + assert_noop!( + Pools::chill(RuntimeOrigin::signed(20), 1), + PoolsError::::NotNominator, + ); + + // now nominator can resume nomination + assert_ok!(Pools::nominate(RuntimeOrigin::signed(10), 1, vec![1, 2, 3])); + + // skip to make the unbonding period end. + CurrentEra::::set(Some(BondingDuration::get())); + + // members can now withdraw. + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(20), 20, 0)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(21), 21, 0)); + + assert_eq!( + staking_events_since_last_call(), + vec![ + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Chilled { stash: POOL1_BONDED }, + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 }, // other member bonding + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 }, // depositor bond extra + StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 20 }, + ] + ); + }) +} + #[test] fn pool_slash_e2e() { new_test_ext().execute_with(|| { diff --git a/substrate/frame/offences/Cargo.toml b/substrate/frame/offences/Cargo.toml index e1bf71f70dfb8b607f6e2e40738dad326434854b..b3ef4671ce56f4b66d7e7cb4ce7ec7faec05d3fe 100644 --- a/substrate/frame/offences/Cargo.toml +++ b/substrate/frame/offences/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", optional = true } +serde = { optional = true, workspace = true, default-features = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } pallet-balances = { path = "../balances", default-features = false } diff --git a/substrate/frame/parameters/Cargo.toml b/substrate/frame/parameters/Cargo.toml index 82059e3a73dcbb26ca4eb90d74a412f00a0f154d..07ebeea52d5298be0db8b00c09ab73641d20080b 100644 --- a/substrate/frame/parameters/Cargo.toml +++ b/substrate/frame/parameters/Cargo.toml @@ -11,7 +11,7 @@ edition.workspace = true codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["max-encoded-len"] } scale-info = { version = "2.1.2", default-features = false, features = ["derive"] } paste = { version = "1.0.14", default-features = false } -serde = { version = "1.0.196", features = ["derive"], optional = true } +serde = { features = ["derive"], optional = true, workspace = true, default-features = true } docify = "0.2.5" frame-support = { path = "../support", default-features = false, features = ["experimental"] } diff --git a/substrate/frame/preimage/src/lib.rs b/substrate/frame/preimage/src/lib.rs index e344bdfe2d8feff5a25ed686d7301f462fa41189..4e474685166631ba41eed644d3754a332a290663 100644 --- a/substrate/frame/preimage/src/lib.rs +++ b/substrate/frame/preimage/src/lib.rs @@ -102,7 +102,7 @@ pub const MAX_HASH_UPGRADE_BULK_COUNT: u32 = 1024; pub mod pallet { use super::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::config] diff --git a/substrate/frame/referenda/Cargo.toml b/substrate/frame/referenda/Cargo.toml index 13c8cfc596175fe474f44e40bb85620b69c681af..2dfb25a2fd3a5b50bdc7828e50c8e247cd7f936a 100644 --- a/substrate/frame/referenda/Cargo.toml +++ b/substrate/frame/referenda/Cargo.toml @@ -21,7 +21,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", features = ["derive"], optional = true } +serde = { features = ["derive"], optional = true, workspace = true, default-features = true } sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/referenda/src/lib.rs b/substrate/frame/referenda/src/lib.rs index c5bf2266e672542b77c79bee71176f8e526018d4..e616056c3022abe796cd494e91fa4e9565843e60 100644 --- a/substrate/frame/referenda/src/lib.rs +++ b/substrate/frame/referenda/src/lib.rs @@ -143,7 +143,7 @@ pub mod pallet { use frame_support::{pallet_prelude::*, traits::EnsureOriginWithArg}; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] diff --git a/substrate/frame/referenda/src/migration.rs b/substrate/frame/referenda/src/migration.rs index a80897242eec67aad194f8deaf1cd6527533eab9..631eb7340e567e8115e1755d513980fb148700c5 100644 --- a/substrate/frame/referenda/src/migration.rs +++ b/substrate/frame/referenda/src/migration.rs @@ -109,16 +109,16 @@ pub mod v1 { } fn on_runtime_upgrade() -> Weight { - let current_version = Pallet::::current_storage_version(); - let onchain_version = Pallet::::on_chain_storage_version(); + let in_code_version = Pallet::::in_code_storage_version(); + let on_chain_version = Pallet::::on_chain_storage_version(); let mut weight = T::DbWeight::get().reads(1); log::info!( target: TARGET, - "running migration with current storage version {:?} / onchain {:?}.", - current_version, - onchain_version + "running migration with in-code storage version {:?} / onchain {:?}.", + in_code_version, + on_chain_version ); - if onchain_version != 0 { + if on_chain_version != 0 { log::warn!(target: TARGET, "skipping migration from v0 to v1."); return weight } @@ -149,8 +149,8 @@ pub mod v1 { #[cfg(feature = "try-runtime")] fn post_upgrade(state: Vec) -> Result<(), TryRuntimeError> { - let onchain_version = Pallet::::on_chain_storage_version(); - ensure!(onchain_version == 1, "must upgrade from version 0 to 1."); + let on_chain_version = Pallet::::on_chain_storage_version(); + ensure!(on_chain_version == 1, "must upgrade from version 0 to 1."); let pre_referendum_count: u32 = Decode::decode(&mut &state[..]) .expect("failed to decode the state from pre-upgrade."); let post_referendum_count = ReferendumInfoFor::::iter().count() as u32; diff --git a/substrate/frame/remark/Cargo.toml b/substrate/frame/remark/Cargo.toml index c5f39a649ecdd07c0b1265c6dead34710444e233..45710f0539e255b48caf4e7dce9a99f46600d4e5 100644 --- a/substrate/frame/remark/Cargo.toml +++ b/substrate/frame/remark/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", optional = true } +serde = { optional = true, workspace = true, default-features = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/scheduler/src/lib.rs b/substrate/frame/scheduler/src/lib.rs index daebebdee9956a2317ca6d755999797a52ac42ce..af3abd8ac4f2a81b363c4553f3652a2cba972212 100644 --- a/substrate/frame/scheduler/src/lib.rs +++ b/substrate/frame/scheduler/src/lib.rs @@ -229,7 +229,7 @@ pub mod pallet { use frame_support::{dispatch::PostDispatchInfo, pallet_prelude::*}; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); #[pallet::pallet] diff --git a/substrate/frame/session/src/historical/mod.rs b/substrate/frame/session/src/historical/mod.rs index d74e9dd0b7c54042840c62feff0693359f97c6c9..b9cecea1a7f7144fb7f846548b827455dce0a1e5 100644 --- a/substrate/frame/session/src/historical/mod.rs +++ b/substrate/frame/session/src/historical/mod.rs @@ -58,7 +58,7 @@ pub mod pallet { use super::*; use frame_support::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] diff --git a/substrate/frame/session/src/lib.rs b/substrate/frame/session/src/lib.rs index 178d43f596b2742af69d35437592d78bd030a63d..7d8128dbc1fe64c647994f6da901d4b3bd9e008f 100644 --- a/substrate/frame/session/src/lib.rs +++ b/substrate/frame/session/src/lib.rs @@ -368,7 +368,7 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(0); #[pallet::pallet] @@ -460,27 +460,13 @@ pub mod pallet { ); self.keys.iter().map(|x| x.1.clone()).collect() }); - assert!( - !initial_validators_0.is_empty(), - "Empty validator set for session 0 in genesis block!" - ); let initial_validators_1 = T::SessionManager::new_session_genesis(1) .unwrap_or_else(|| initial_validators_0.clone()); - assert!( - !initial_validators_1.is_empty(), - "Empty validator set for session 1 in genesis block!" - ); let queued_keys: Vec<_> = initial_validators_1 - .iter() - .cloned() - .map(|v| { - ( - v.clone(), - Pallet::::load_keys(&v).expect("Validator in session 1 missing keys!"), - ) - }) + .into_iter() + .filter_map(|v| Pallet::::load_keys(&v).map(|k| (v, k))) .collect(); // Tell everyone about the genesis session keys diff --git a/substrate/frame/society/src/migrations.rs b/substrate/frame/society/src/migrations.rs index dafb1e0b9e5e99706de94b1a4fdd1808343af29d..6a1029114519db8c41c56301197d04de26bfb6c8 100644 --- a/substrate/frame/society/src/migrations.rs +++ b/substrate/frame/society/src/migrations.rs @@ -40,11 +40,11 @@ impl< { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, TryRuntimeError> { - let current = Pallet::::current_storage_version(); - let onchain = Pallet::::on_chain_storage_version(); - ensure!(onchain == 0 && current == 2, "pallet_society: invalid version"); + let in_code = Pallet::::in_code_storage_version(); + let on_chain = Pallet::::on_chain_storage_version(); + ensure!(on_chain == 0 && in_code == 2, "pallet_society: invalid version"); - Ok((old::Candidates::::get(), old::Members::::get()).encode()) + Ok((v0::Candidates::::get(), v0::Members::::get()).encode()) } fn on_runtime_upgrade() -> Weight { @@ -103,7 +103,7 @@ pub type MigrateToV2 = frame_support::migrations::VersionedMi ::DbWeight, >; -pub(crate) mod old { +pub(crate) mod v0 { use super::*; use frame_support::storage_alias; @@ -230,37 +230,37 @@ pub fn assert_internal_consistency, I: Instance + 'static>() { } // We don't use these - make sure they don't exist. - assert_eq!(old::SuspendedCandidates::::iter().count(), 0); - assert_eq!(old::Strikes::::iter().count(), 0); - assert_eq!(old::Vouching::::iter().count(), 0); - assert!(!old::Defender::::exists()); - assert!(!old::Members::::exists()); + assert_eq!(v0::SuspendedCandidates::::iter().count(), 0); + assert_eq!(v0::Strikes::::iter().count(), 0); + assert_eq!(v0::Vouching::::iter().count(), 0); + assert!(!v0::Defender::::exists()); + assert!(!v0::Members::::exists()); } pub fn from_original, I: Instance + 'static>( past_payouts: &mut [(::AccountId, BalanceOf)], ) -> Result { // Migrate Bids from old::Bids (just a trunctation). - Bids::::put(BoundedVec::<_, T::MaxBids>::truncate_from(old::Bids::::take())); + Bids::::put(BoundedVec::<_, T::MaxBids>::truncate_from(v0::Bids::::take())); // Initialise round counter. RoundCount::::put(0); // Migrate Candidates from old::Candidates - for Bid { who: candidate, kind, value } in old::Candidates::::take().into_iter() { + for Bid { who: candidate, kind, value } in v0::Candidates::::take().into_iter() { let mut tally = Tally::default(); // Migrate Votes from old::Votes // No need to drain, since we're overwriting values. - for (voter, vote) in old::Votes::::iter_prefix(&candidate) { + for (voter, vote) in v0::Votes::::iter_prefix(&candidate) { Votes::::insert( &candidate, &voter, - Vote { approve: vote == old::Vote::Approve, weight: 1 }, + Vote { approve: vote == v0::Vote::Approve, weight: 1 }, ); match vote { - old::Vote::Approve => tally.approvals.saturating_inc(), - old::Vote::Reject => tally.rejections.saturating_inc(), - old::Vote::Skeptic => Skeptic::::put(&voter), + v0::Vote::Approve => tally.approvals.saturating_inc(), + v0::Vote::Reject => tally.rejections.saturating_inc(), + v0::Vote::Skeptic => Skeptic::::put(&voter), } } Candidates::::insert( @@ -271,9 +271,9 @@ pub fn from_original, I: Instance + 'static>( // Migrate Members from old::Members old::Strikes old::Vouching let mut member_count = 0; - for member in old::Members::::take() { - let strikes = old::Strikes::::take(&member); - let vouching = old::Vouching::::take(&member); + for member in v0::Members::::take() { + let strikes = v0::Strikes::::take(&member); + let vouching = v0::Vouching::::take(&member); let record = MemberRecord { index: member_count, rank: 0, strikes, vouching }; Members::::insert(&member, record); MemberByIndex::::insert(member_count, &member); @@ -314,7 +314,7 @@ pub fn from_original, I: Instance + 'static>( // Migrate Payouts from: old::Payouts and raw info (needed since we can't query old chain // state). past_payouts.sort(); - for (who, mut payouts) in old::Payouts::::iter() { + for (who, mut payouts) in v0::Payouts::::iter() { payouts.truncate(T::MaxPayouts::get() as usize); // ^^ Safe since we already truncated. let paid = past_payouts @@ -329,19 +329,19 @@ pub fn from_original, I: Instance + 'static>( } // Migrate SuspendedMembers from old::SuspendedMembers old::Strikes old::Vouching. - for who in old::SuspendedMembers::::iter_keys() { - let strikes = old::Strikes::::take(&who); - let vouching = old::Vouching::::take(&who); + for who in v0::SuspendedMembers::::iter_keys() { + let strikes = v0::Strikes::::take(&who); + let vouching = v0::Vouching::::take(&who); let record = MemberRecord { index: 0, rank: 0, strikes, vouching }; SuspendedMembers::::insert(&who, record); } // Any suspended candidates remaining are rejected. - let _ = old::SuspendedCandidates::::clear(u32::MAX, None); + let _ = v0::SuspendedCandidates::::clear(u32::MAX, None); // We give the current defender the benefit of the doubt. - old::Defender::::kill(); - let _ = old::DefenderVotes::::clear(u32::MAX, None); + v0::Defender::::kill(); + let _ = v0::DefenderVotes::::clear(u32::MAX, None); Ok(T::BlockWeights::get().max_block) } diff --git a/substrate/frame/society/src/tests.rs b/substrate/frame/society/src/tests.rs index 940643168fb41fd723905497935b7cb543b7f7b7..411567e1dedfde46450c0e778bcb14bb00883962 100644 --- a/substrate/frame/society/src/tests.rs +++ b/substrate/frame/society/src/tests.rs @@ -18,7 +18,7 @@ //! Tests for the module. use super::*; -use migrations::old; +use migrations::v0; use mock::*; use frame_support::{assert_noop, assert_ok}; @@ -32,41 +32,41 @@ use RuntimeOrigin as Origin; #[test] fn migration_works() { EnvBuilder::new().founded(false).execute(|| { - use old::Vote::*; + use v0::Vote::*; // Initialise the old storage items. Founder::::put(10); Head::::put(30); - old::Members::::put(vec![10, 20, 30]); - old::Vouching::::insert(30, Vouching); - old::Vouching::::insert(40, Banned); - old::Strikes::::insert(20, 1); - old::Strikes::::insert(30, 2); - old::Strikes::::insert(40, 5); - old::Payouts::::insert(20, vec![(1, 1)]); - old::Payouts::::insert( + v0::Members::::put(vec![10, 20, 30]); + v0::Vouching::::insert(30, Vouching); + v0::Vouching::::insert(40, Banned); + v0::Strikes::::insert(20, 1); + v0::Strikes::::insert(30, 2); + v0::Strikes::::insert(40, 5); + v0::Payouts::::insert(20, vec![(1, 1)]); + v0::Payouts::::insert( 30, (0..=::MaxPayouts::get()) .map(|i| (i as u64, i as u64)) .collect::>(), ); - old::SuspendedMembers::::insert(40, true); + v0::SuspendedMembers::::insert(40, true); - old::Defender::::put(20); - old::DefenderVotes::::insert(10, Approve); - old::DefenderVotes::::insert(20, Approve); - old::DefenderVotes::::insert(30, Reject); + v0::Defender::::put(20); + v0::DefenderVotes::::insert(10, Approve); + v0::DefenderVotes::::insert(20, Approve); + v0::DefenderVotes::::insert(30, Reject); - old::SuspendedCandidates::::insert(50, (10, Deposit(100))); + v0::SuspendedCandidates::::insert(50, (10, Deposit(100))); - old::Candidates::::put(vec![ + v0::Candidates::::put(vec![ Bid { who: 60, kind: Deposit(100), value: 200 }, Bid { who: 70, kind: Vouch(30, 30), value: 100 }, ]); - old::Votes::::insert(60, 10, Approve); - old::Votes::::insert(70, 10, Reject); - old::Votes::::insert(70, 20, Approve); - old::Votes::::insert(70, 30, Approve); + v0::Votes::::insert(60, 10, Approve); + v0::Votes::::insert(70, 10, Reject); + v0::Votes::::insert(70, 20, Approve); + v0::Votes::::insert(70, 30, Approve); let bids = (0..=::MaxBids::get()) .map(|i| Bid { @@ -75,7 +75,7 @@ fn migration_works() { value: 10u64 + i as u64, }) .collect::>(); - old::Bids::::put(bids); + v0::Bids::::put(bids); migrations::from_original::(&mut [][..]).expect("migration failed"); migrations::assert_internal_consistency::(); diff --git a/substrate/frame/src/lib.rs b/substrate/frame/src/lib.rs index a1715ba4900102595ac6ec926a0aeeb2a2be1650..52db7c34bfdc46efcb4008c04ae4f8b185fb3ffb 100644 --- a/substrate/frame/src/lib.rs +++ b/substrate/frame/src/lib.rs @@ -163,6 +163,12 @@ pub mod runtime { ConstU32, ConstU64, ConstU8, }; + /// Primary types used to parameterize `EnsureOrigin` and `EnsureRootWithArg`. + pub use frame_system::{ + EnsureNever, EnsureNone, EnsureRoot, EnsureRootWithSuccess, EnsureSigned, + EnsureSignedBy, + }; + /// Types to define your runtime version. pub use sp_version::{create_runtime_str, runtime_version, RuntimeVersion}; @@ -191,7 +197,7 @@ pub mod runtime { // Types often used in the runtime APIs. pub use sp_core::OpaqueMetadata; pub use sp_inherents::{CheckInherentsResult, InherentData}; - pub use sp_runtime::ApplyExtrinsicResult; + pub use sp_runtime::{ApplyExtrinsicResult, ExtrinsicInclusionMode}; pub use frame_system_rpc_runtime_api::*; pub use sp_api::{self, *}; diff --git a/substrate/frame/staking/Cargo.toml b/substrate/frame/staking/Cargo.toml index 51aa8c79fadacc99f544961da730d339d1dd1f86..d2a46146931b8863ae347277db8076850dc76d68 100644 --- a/substrate/frame/staking/Cargo.toml +++ b/substrate/frame/staking/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.196", default-features = false, features = ["alloc", "derive"] } +serde = { features = ["alloc", "derive"], workspace = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } diff --git a/substrate/frame/staking/reward-curve/Cargo.toml b/substrate/frame/staking/reward-curve/Cargo.toml index bae730f446fa1dee7338a6e07b994d3c08f577c5..e2a2782db2da1527eff9d9948020fb72bd02e370 100644 --- a/substrate/frame/staking/reward-curve/Cargo.toml +++ b/substrate/frame/staking/reward-curve/Cargo.toml @@ -20,8 +20,8 @@ proc-macro = true [dependencies] proc-macro-crate = "3.0.0" proc-macro2 = "1.0.56" -quote = "1.0.28" -syn = { version = "2.0.49", features = ["full", "visit"] } +quote = { workspace = true } +syn = { features = ["full", "visit"], workspace = true } [dev-dependencies] sp-runtime = { path = "../../../primitives/runtime" } diff --git a/substrate/frame/staking/src/migrations.rs b/substrate/frame/staking/src/migrations.rs index 98984be9920d782bcfbe4bd1dec733294dc604e7..3e3b1113a8198ba4d1718cf22f36c3568057c0ff 100644 --- a/substrate/frame/staking/src/migrations.rs +++ b/substrate/frame/staking/src/migrations.rs @@ -67,11 +67,11 @@ pub mod v14 { pub struct MigrateToV14(core::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV14 { fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); + let in_code = Pallet::::in_code_storage_version(); let on_chain = Pallet::::on_chain_storage_version(); - if current == 14 && on_chain == 13 { - current.put::>(); + if in_code == 14 && on_chain == 13 { + in_code.put::>(); log!(info, "v14 applied successfully."); T::DbWeight::get().reads_writes(1, 1) @@ -108,12 +108,12 @@ pub mod v13 { } fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); + let in_code = Pallet::::in_code_storage_version(); let onchain = StorageVersion::::get(); - if current == 13 && onchain == ObsoleteReleases::V12_0_0 { + if in_code == 13 && onchain == ObsoleteReleases::V12_0_0 { StorageVersion::::kill(); - current.put::>(); + in_code.put::>(); log!(info, "v13 applied successfully"); T::DbWeight::get().reads_writes(1, 2) diff --git a/substrate/frame/staking/src/pallet/mod.rs b/substrate/frame/staking/src/pallet/mod.rs index e0213efd507fd46d6d7d8800b6d02c89229a13af..992a7fe2c9c6218c7d132571a2f0cab0ee6618a4 100644 --- a/substrate/frame/staking/src/pallet/mod.rs +++ b/substrate/frame/staking/src/pallet/mod.rs @@ -66,7 +66,7 @@ pub mod pallet { use super::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(14); #[pallet::pallet] diff --git a/substrate/frame/state-trie-migration/Cargo.toml b/substrate/frame/state-trie-migration/Cargo.toml index 5f86fb1595a1a534d5387100e8cc6352ebd096f6..e837956613edd2a30146bc6e41b4518d55c95095 100644 --- a/substrate/frame/state-trie-migration/Cargo.toml +++ b/substrate/frame/state-trie-migration/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", optional = true } +serde = { optional = true, workspace = true, default-features = true } thousands = { version = "0.2.0", optional = true } zstd = { version = "0.12.4", default-features = false, optional = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/support/Cargo.toml b/substrate/frame/support/Cargo.toml index 73abc1a6aed0343b104962f2c07536e2d2c259a7..be60068f1220b701e15f440b11b9775a5bcbaaf1 100644 --- a/substrate/frame/support/Cargo.toml +++ b/substrate/frame/support/Cargo.toml @@ -17,14 +17,25 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = { version = "6.1", default-features = false } -serde = { version = "1.0.196", default-features = false, features = ["alloc", "derive"] } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-metadata = { version = "16.0.0", default-features = false, features = ["current"] } -sp-api = { path = "../../primitives/api", default-features = false, features = ["frame-metadata"] } +serde = { features = ["alloc", "derive"], workspace = true } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ + "derive", + "max-encoded-len", +] } +scale-info = { version = "2.10.0", default-features = false, features = [ + "derive", +] } +frame-metadata = { version = "16.0.0", default-features = false, features = [ + "current", +] } +sp-api = { path = "../../primitives/api", default-features = false, features = [ + "frame-metadata", +] } sp-std = { path = "../../primitives/std", default-features = false } sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } +sp-runtime = { path = "../../primitives/runtime", default-features = false, features = [ + "serde", +] } sp-tracing = { path = "../../primitives/tracing", default-features = false } sp-core = { path = "../../primitives/core", default-features = false } sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } @@ -46,7 +57,7 @@ sp-crypto-hashing-proc-macro = { path = "../../primitives/crypto/hashing/proc-ma k256 = { version = "0.13.1", default-features = false, features = ["ecdsa"] } environmental = { version = "1.1.4", default-features = false } sp-genesis-builder = { path = "../../primitives/genesis-builder", default-features = false } -serde_json = { version = "1.0.113", default-features = false, features = ["alloc"] } +serde_json = { features = ["alloc"], workspace = true } docify = "0.2.7" static_assertions = "1.1.0" @@ -55,6 +66,7 @@ aquamarine = { version = "0.5.0" } [dev-dependencies] assert_matches = "1.3.0" pretty_assertions = "1.2.1" +sp-timestamp = { path = "../../primitives/timestamp", default-features = false } frame-system = { path = "../system" } sp-crypto-hashing = { path = "../../primitives/crypto/hashing" } @@ -83,6 +95,7 @@ std = [ "sp-staking/std", "sp-state-machine/std", "sp-std/std", + "sp-timestamp/std", "sp-tracing/std", "sp-weights/std", ] @@ -96,7 +109,9 @@ try-runtime = [ "sp-debug-derive/force-debug", "sp-runtime/try-runtime", ] -experimental = [] +experimental = [ + "frame-support-procedural/experimental", +] # By default some types have documentation, `no-metadata-docs` allows to reduce the documentation # in the metadata. no-metadata-docs = [ diff --git a/substrate/frame/support/procedural/Cargo.toml b/substrate/frame/support/procedural/Cargo.toml index f3434bd24916f30d484374c7730f12bac8d64bbf..dd0688f2ad06a94332bda9f5401a5578aae04e28 100644 --- a/substrate/frame/support/procedural/Cargo.toml +++ b/substrate/frame/support/procedural/Cargo.toml @@ -23,8 +23,8 @@ Inflector = "0.11.4" cfg-expr = "0.15.5" itertools = "0.10.3" proc-macro2 = "1.0.56" -quote = "1.0.28" -syn = { version = "2.0.49", features = ["full", "visit-mut"] } +quote = { workspace = true } +syn = { features = ["full", "visit-mut"], workspace = true } frame-support-procedural-tools = { path = "tools" } macro_magic = { version = "0.5.0", features = ["proc_support"] } proc-macro-warning = { version = "1.0.0", default-features = false } @@ -38,6 +38,7 @@ regex = "1" default = ["std"] std = ["sp-crypto-hashing/std"] no-metadata-docs = [] +experimental = [] # Generate impl-trait for tuples with the given number of tuples. Will be needed as the number of # pallets in a runtime grows. Does increase the compile time! tuples-96 = [] diff --git a/substrate/frame/support/procedural/src/benchmark.rs b/substrate/frame/support/procedural/src/benchmark.rs index 6ded82d91aa5cd9bb9cceb4594ae9768d4948483..27c75a7f054cfbbcd71750f11985665b60cc3aed 100644 --- a/substrate/frame/support/procedural/src/benchmark.rs +++ b/substrate/frame/support/procedural/src/benchmark.rs @@ -65,6 +65,7 @@ struct RangeArgs { start: syn::GenericArgument, _comma: Comma, end: syn::GenericArgument, + _trailing_comma: Option, _gt_token: Gt, } @@ -431,7 +432,7 @@ pub fn benchmarks( let mut benchmarks_by_name_mappings: Vec = Vec::new(); let test_idents: Vec = benchmark_names_str .iter() - .map(|n| Ident::new(format!("test_{}", n).as_str(), Span::call_site())) + .map(|n| Ident::new(format!("test_benchmark_{}", n).as_str(), Span::call_site())) .collect(); for i in 0..benchmark_names.len() { let name_ident = &benchmark_names[i]; @@ -441,6 +442,37 @@ pub fn benchmarks( benchmarks_by_name_mappings.push(quote!(#name_str => Self::#test_ident())) } + let impl_test_function = content + .iter_mut() + .find_map(|item| { + let Item::Macro(item_macro) = item else { + return None; + }; + + if !item_macro + .mac + .path + .segments + .iter() + .any(|s| s.ident == "impl_benchmark_test_suite") + { + return None; + } + + let tokens = item_macro.mac.tokens.clone(); + *item = Item::Verbatim(quote! {}); + + Some(quote! { + impl_test_function!( + (#( {} #benchmark_names )*) + (#( #extra_benchmark_names )*) + (#( #skip_meta_benchmark_names )*) + #tokens + ); + }) + }) + .unwrap_or(quote! {}); + // emit final quoted tokens let res = quote! { #(#mod_attrs) @@ -676,6 +708,8 @@ pub fn benchmarks( } } } + + #impl_test_function } #mod_vis use #mod_name::*; }; @@ -733,7 +767,8 @@ fn expand_benchmark( let setup_stmts = benchmark_def.setup_stmts; let verify_stmts = benchmark_def.verify_stmts; let last_stmt = benchmark_def.last_stmt; - let test_ident = Ident::new(format!("test_{}", name.to_string()).as_str(), Span::call_site()); + let test_ident = + Ident::new(format!("test_benchmark_{}", name.to_string()).as_str(), Span::call_site()); // unroll params (prepare for quoting) let unrolled = UnrolledParams::from(&benchmark_def.params); diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/config.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/config.rs index ffe55bceb80ef96bc9e7814cfbd8bec6c62ab562..dbbe6ba6e6c32ec09a8514033108617883075243 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/config.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/config.rs @@ -76,10 +76,6 @@ pub fn expand_outer_config( #fields } - #[cfg(any(feature = "std", test))] - #[deprecated(note = "GenesisConfig is planned to be removed in December 2023. Use `RuntimeGenesisConfig` instead.")] - pub type GenesisConfig = RuntimeGenesisConfig; - #[cfg(any(feature = "std", test))] impl #scrate::sp_runtime::BuildStorage for RuntimeGenesisConfig { fn assimilate_storage( @@ -99,6 +95,17 @@ pub fn expand_outer_config( ::on_genesis(); } } + + /// Test the `Default` derive impl of the `RuntimeGenesisConfig`. + #[cfg(test)] + #[test] + fn test_genesis_config_builds() { + #scrate::__private::sp_io::TestExternalities::default().execute_with(|| { + ::build( + &RuntimeGenesisConfig::default() + ); + }); + } } } diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs index 34b9d21d8ce84fafb7958278227a3f572d8d2f74..da483fa6cf0b6aaa724792f724482e88027fa696 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs @@ -204,47 +204,50 @@ pub fn expand_outer_inherent( } } + impl #scrate::traits::IsInherent<<#block as #scrate::sp_runtime::traits::Block>::Extrinsic> for #runtime { + fn is_inherent(ext: &<#block as #scrate::sp_runtime::traits::Block>::Extrinsic) -> bool { + use #scrate::inherent::ProvideInherent; + use #scrate::traits::{IsSubType, ExtrinsicCall}; + + if #scrate::sp_runtime::traits::Extrinsic::is_signed(ext).unwrap_or(false) { + // Signed extrinsics are never inherents. + return false + } + + #( + #pallet_attrs + { + let call = <#unchecked_extrinsic as ExtrinsicCall>::call(ext); + if let Some(call) = IsSubType::<_>::is_sub_type(call) { + if <#pallet_names as ProvideInherent>::is_inherent(&call) { + return true; + } + } + } + )* + false + } + } + impl #scrate::traits::EnsureInherentsAreFirst<#block> for #runtime { - fn ensure_inherents_are_first(block: &#block) -> Result<(), u32> { + fn ensure_inherents_are_first(block: &#block) -> Result { use #scrate::inherent::ProvideInherent; use #scrate::traits::{IsSubType, ExtrinsicCall}; use #scrate::sp_runtime::traits::Block as _; - let mut first_signed_observed = false; + let mut num_inherents = 0u32; for (i, xt) in block.extrinsics().iter().enumerate() { - let is_signed = #scrate::sp_runtime::traits::Extrinsic::is_signed(xt) - .unwrap_or(false); - - let is_inherent = if is_signed { - // Signed extrinsics are not inherents. - false - } else { - let mut is_inherent = false; - #( - #pallet_attrs - { - let call = <#unchecked_extrinsic as ExtrinsicCall>::call(xt); - if let Some(call) = IsSubType::<_>::is_sub_type(call) { - if #pallet_names::is_inherent(&call) { - is_inherent = true; - } - } - } - )* - is_inherent - }; - - if !is_inherent { - first_signed_observed = true; - } + if >::is_inherent(xt) { + if num_inherents != i as u32 { + return Err(i as u32); + } - if first_signed_observed && is_inherent { - return Err(i as u32) + num_inherents += 1; // Safe since we are in an `enumerate` loop. } } - Ok(()) + Ok(num_inherents) } } } diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs index b421d2aaffabe077450d81e3396976f3067b264a..83049919d01c34f05ce7be6725ab8bc60731f47b 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs @@ -104,7 +104,7 @@ pub fn expand_outer_origin( #[doc = #doc_string] #[derive(Clone)] pub struct RuntimeOrigin { - caller: OriginCaller, + pub caller: OriginCaller, filter: #scrate::__private::sp_std::rc::Rc::RuntimeCall) -> bool>>, } diff --git a/substrate/frame/support/procedural/src/construct_runtime/mod.rs b/substrate/frame/support/procedural/src/construct_runtime/mod.rs index 54ed15f7b1d36d6ee5eb724381152af03bef60c8..1937dfa9ca4d0e966aa1c46e1b240fe53dc2ab54 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/mod.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/mod.rs @@ -208,8 +208,8 @@ //! This macro returns the ` :: expanded { Error }` list of additional parts we would like to //! expose. -mod expand; -mod parse; +pub(crate) mod expand; +pub(crate) mod parse; use crate::pallet::parse::helper::two128_str; use cfg_expr::Predicate; @@ -233,25 +233,38 @@ pub fn construct_runtime(input: TokenStream) -> TokenStream { let input_copy = input.clone(); let definition = syn::parse_macro_input!(input as RuntimeDeclaration); - let res = match definition { - RuntimeDeclaration::Implicit(implicit_def) => - check_pallet_number(input_copy.clone().into(), implicit_def.pallets.len()).and_then( - |_| construct_runtime_implicit_to_explicit(input_copy.into(), implicit_def), - ), - RuntimeDeclaration::Explicit(explicit_decl) => check_pallet_number( - input_copy.clone().into(), - explicit_decl.pallets.len(), - ) - .and_then(|_| { - construct_runtime_explicit_to_explicit_expanded(input_copy.into(), explicit_decl) - }), - RuntimeDeclaration::ExplicitExpanded(explicit_decl) => - check_pallet_number(input_copy.into(), explicit_decl.pallets.len()) - .and_then(|_| construct_runtime_final_expansion(explicit_decl)), + let (check_pallet_number_res, res) = match definition { + RuntimeDeclaration::Implicit(implicit_def) => ( + check_pallet_number(input_copy.clone().into(), implicit_def.pallets.len()), + construct_runtime_implicit_to_explicit(input_copy.into(), implicit_def), + ), + RuntimeDeclaration::Explicit(explicit_decl) => ( + check_pallet_number(input_copy.clone().into(), explicit_decl.pallets.len()), + construct_runtime_explicit_to_explicit_expanded(input_copy.into(), explicit_decl), + ), + RuntimeDeclaration::ExplicitExpanded(explicit_decl) => ( + check_pallet_number(input_copy.into(), explicit_decl.pallets.len()), + construct_runtime_final_expansion(explicit_decl), + ), }; let res = res.unwrap_or_else(|e| e.to_compile_error()); + // We want to provide better error messages to the user and thus, handle the error here + // separately. If there is an error, we print the error and still generate all of the code to + // get in overall less errors for the user. + let res = if let Err(error) = check_pallet_number_res { + let error = error.to_compile_error(); + + quote! { + #error + + #res + } + } else { + res + }; + let res = expander::Expander::new("construct_runtime") .dry(std::env::var("EXPAND_MACROS").is_err()) .verbose(true) @@ -502,7 +515,7 @@ fn construct_runtime_final_expansion( Ok(res) } -fn decl_all_pallets<'a>( +pub(crate) fn decl_all_pallets<'a>( runtime: &'a Ident, pallet_declarations: impl Iterator, features: &HashSet<&str>, @@ -611,7 +624,8 @@ fn decl_all_pallets<'a>( #( #all_pallets_without_system )* ) } -fn decl_pallet_runtime_setup( + +pub(crate) fn decl_pallet_runtime_setup( runtime: &Ident, pallet_declarations: &[Pallet], scrate: &TokenStream2, @@ -717,7 +731,7 @@ fn decl_pallet_runtime_setup( ) } -fn decl_integrity_test(scrate: &TokenStream2) -> TokenStream2 { +pub(crate) fn decl_integrity_test(scrate: &TokenStream2) -> TokenStream2 { quote!( #[cfg(test)] mod __construct_runtime_integrity_test { @@ -732,7 +746,7 @@ fn decl_integrity_test(scrate: &TokenStream2) -> TokenStream2 { ) } -fn decl_static_assertions( +pub(crate) fn decl_static_assertions( runtime: &Ident, pallet_decls: &[Pallet], scrate: &TokenStream2, @@ -763,7 +777,7 @@ fn decl_static_assertions( } } -fn check_pallet_number(input: TokenStream2, pallet_num: usize) -> Result<()> { +pub(crate) fn check_pallet_number(input: TokenStream2, pallet_num: usize) -> Result<()> { let max_pallet_num = { if cfg!(feature = "tuples-96") { 96 diff --git a/substrate/frame/support/procedural/src/derive_impl.rs b/substrate/frame/support/procedural/src/derive_impl.rs index d6d5bf68efd5689af2e96250e24cef3cd7faf47b..8740ccd401abedcbcb27e19c77ef3e885207342a 100644 --- a/substrate/frame/support/procedural/src/derive_impl.rs +++ b/substrate/frame/support/procedural/src/derive_impl.rs @@ -172,6 +172,33 @@ fn combine_impls( final_impl } +/// Computes the disambiguation path for the `derive_impl` attribute macro. +/// +/// When specified explicitly using `as [disambiguation_path]` in the macro attr, the +/// disambiguation is used as is. If not, we infer the disambiguation path from the +/// `foreign_impl_path` and the computed scope. +fn compute_disambiguation_path( + disambiguation_path: Option, + foreign_impl: ItemImpl, + default_impl_path: Path, +) -> Result { + match (disambiguation_path, foreign_impl.clone().trait_) { + (Some(disambiguation_path), _) => Ok(disambiguation_path), + (None, Some((_, foreign_impl_path, _))) => + if default_impl_path.segments.len() > 1 { + let scope = default_impl_path.segments.first(); + Ok(parse_quote!(#scope :: #foreign_impl_path)) + } else { + Ok(foreign_impl_path) + }, + _ => Err(syn::Error::new( + default_impl_path.span(), + "Impl statement must have a defined type being implemented \ + for a defined type such as `impl A for B`", + )), + } +} + /// Internal implementation behind [`#[derive_impl(..)]`](`macro@crate::derive_impl`). /// /// `default_impl_path`: the module path of the external `impl` statement whose tokens we are @@ -194,18 +221,11 @@ pub fn derive_impl( let foreign_impl = parse2::(foreign_tokens)?; let default_impl_path = parse2::(default_impl_path)?; - // have disambiguation_path default to the item being impl'd in the foreign impl if we - // don't specify an `as [disambiguation_path]` in the macro attr - let disambiguation_path = match (disambiguation_path, foreign_impl.clone().trait_) { - (Some(disambiguation_path), _) => disambiguation_path, - (None, Some((_, foreign_impl_path, _))) => foreign_impl_path, - _ => - return Err(syn::Error::new( - foreign_impl.span(), - "Impl statement must have a defined type being implemented \ - for a defined type such as `impl A for B`", - )), - }; + let disambiguation_path = compute_disambiguation_path( + disambiguation_path, + foreign_impl.clone(), + default_impl_path.clone(), + )?; // generate the combined impl let combined_impl = combine_impls( @@ -257,3 +277,27 @@ fn test_runtime_type_with_doc() { } } } + +#[test] +fn test_disambugation_path() { + let foreign_impl: ItemImpl = parse_quote!(impl SomeTrait for SomeType {}); + let default_impl_path: Path = parse_quote!(SomeScope::SomeType); + + // disambiguation path is specified + let disambiguation_path = compute_disambiguation_path( + Some(parse_quote!(SomeScope::SomePath)), + foreign_impl.clone(), + default_impl_path.clone(), + ); + assert_eq!(disambiguation_path.unwrap(), parse_quote!(SomeScope::SomePath)); + + // disambiguation path is not specified and the default_impl_path has more than one segment + let disambiguation_path = + compute_disambiguation_path(None, foreign_impl.clone(), default_impl_path.clone()); + assert_eq!(disambiguation_path.unwrap(), parse_quote!(SomeScope::SomeTrait)); + + // disambiguation path is not specified and the default_impl_path has only one segment + let disambiguation_path = + compute_disambiguation_path(None, foreign_impl.clone(), parse_quote!(SomeType)); + assert_eq!(disambiguation_path.unwrap(), parse_quote!(SomeTrait)); +} diff --git a/substrate/frame/support/procedural/src/lib.rs b/substrate/frame/support/procedural/src/lib.rs index 20b8d74310f3e4aca7a5cae61fe3e5d72ec0626c..dee6d522d25ce90fe7ab254147fd31dc46ed3f3d 100644 --- a/substrate/frame/support/procedural/src/lib.rs +++ b/substrate/frame/support/procedural/src/lib.rs @@ -31,6 +31,7 @@ mod match_and_insert; mod no_bound; mod pallet; mod pallet_error; +mod runtime; mod storage_alias; mod transactional; mod tt_macro; @@ -188,133 +189,11 @@ pub fn construct_runtime(input: TokenStream) -> TokenStream { construct_runtime::construct_runtime(input) } -/// The pallet struct placeholder `#[pallet::pallet]` is mandatory and allows you to specify -/// pallet information. /// -/// The struct must be defined as follows: -/// ```ignore -/// #[pallet::pallet] -/// pub struct Pallet(_); -/// ``` -/// I.e. a regular struct definition named `Pallet`, with generic T and no where clause. -/// -/// ## Macro expansion: -/// -/// The macro adds this attribute to the struct definition: -/// ```ignore -/// #[derive( -/// frame_support::CloneNoBound, -/// frame_support::EqNoBound, -/// frame_support::PartialEqNoBound, -/// frame_support::RuntimeDebugNoBound, -/// )] -/// ``` -/// and replaces the type `_` with `PhantomData`. It also implements on the pallet: -/// * `GetStorageVersion` -/// * `OnGenesis`: contains some logic to write the pallet version into storage. -/// * `PalletErrorTypeInfo`: provides the type information for the pallet error, if defined. -/// -/// It declares `type Module` type alias for `Pallet`, used by `construct_runtime`. -/// -/// It implements `PalletInfoAccess` on `Pallet` to ease access to pallet information given by -/// `frame_support::traits::PalletInfo`. (The implementation uses the associated type -/// `frame_system::Config::PalletInfo`). -/// -/// It implements `StorageInfoTrait` on `Pallet` which give information about all storages. -/// -/// If the attribute `generate_store` is set then the macro creates the trait `Store` and -/// implements it on `Pallet`. -/// -/// If the attribute `set_storage_max_encoded_len` is set then the macro calls -/// `StorageInfoTrait` for each storage in the implementation of `StorageInfoTrait` for the -/// pallet. Otherwise it implements `StorageInfoTrait` for the pallet using the -/// `PartialStorageInfoTrait` implementation of storages. -/// -/// ## Dev Mode (`#[pallet(dev_mode)]`) -/// -/// Specifying the argument `dev_mode` will allow you to enable dev mode for a pallet. The aim -/// of dev mode is to loosen some of the restrictions and requirements placed on production -/// pallets for easy tinkering and development. Dev mode pallets should not be used in -/// production. Enabling dev mode has the following effects: -/// -/// * Weights no longer need to be specified on every `#[pallet::call]` declaration. By default, dev -/// mode pallets will assume a weight of zero (`0`) if a weight is not specified. This is -/// equivalent to specifying `#[weight(0)]` on all calls that do not specify a weight. -/// * Call indices no longer need to be specified on every `#[pallet::call]` declaration. By -/// default, dev mode pallets will assume a call index based on the order of the call. -/// * All storages are marked as unbounded, meaning you do not need to implement `MaxEncodedLen` on -/// storage types. This is equivalent to specifying `#[pallet::unbounded]` on all storage type -/// definitions. -/// * Storage hashers no longer need to be specified and can be replaced by `_`. In dev mode, these -/// will be replaced by `Blake2_128Concat`. In case of explicit key-binding, `Hasher` can simply -/// be ignored when in `dev_mode`. -/// -/// Note that the `dev_mode` argument can only be supplied to the `#[pallet]` or -/// `#[frame_support::pallet]` attribute macro that encloses your pallet module. This argument -/// cannot be specified anywhere else, including but not limited to the `#[pallet::pallet]` -/// attribute macro. -/// -///
-/// WARNING:
-/// You should not deploy or use dev mode pallets in production. Doing so can break your chain
-/// and therefore should never be done. Once you are done tinkering, you should remove the
-/// 'dev_mode' argument from your #[pallet] declaration and fix any compile errors before
-/// attempting to use your pallet in a production scenario.
-/// 
-/// -/// See `frame_support::pallet` docs for more info. -/// -/// ## Runtime Metadata Documentation -/// -/// The documentation added to this pallet is included in the runtime metadata. -/// -/// The documentation can be defined in the following ways: -/// -/// ```ignore -/// #[pallet::pallet] -/// /// Documentation for pallet 1 -/// #[doc = "Documentation for pallet 2"] -/// #[doc = include_str!("../README.md")] -/// #[pallet_doc("../doc1.md")] -/// #[pallet_doc("../doc2.md")] -/// pub mod pallet {} -/// ``` -/// -/// The runtime metadata for this pallet contains the following -/// - " Documentation for pallet 1" (captured from `///`) -/// - "Documentation for pallet 2" (captured from `#[doc]`) -/// - content of ../README.md (captured from `#[doc]` with `include_str!`) -/// - content of "../doc1.md" (captured from `pallet_doc`) -/// - content of "../doc2.md" (captured from `pallet_doc`) -/// -/// ### `doc` attribute -/// -/// The value of the `doc` attribute is included in the runtime metadata, as well as -/// expanded on the pallet module. The previous example is expanded to: -/// -/// ```ignore -/// /// Documentation for pallet 1 -/// /// Documentation for pallet 2 -/// /// Content of README.md -/// pub mod pallet {} -/// ``` -/// -/// If you want to specify the file from which the documentation is loaded, you can use the -/// `include_str` macro. However, if you only want the documentation to be included in the -/// runtime metadata, use the `pallet_doc` attribute. -/// -/// ### `pallet_doc` attribute -/// -/// Unlike the `doc` attribute, the documentation provided to the `pallet_doc` attribute is -/// not inserted on the module. -/// -/// The `pallet_doc` attribute can only be provided with one argument, -/// which is the file path that holds the documentation to be added to the metadata. +/// --- /// -/// This approach is beneficial when you use the `include_str` macro at the beginning of the file -/// and want that documentation to extend to the runtime metadata, without reiterating the -/// documentation on the pallet module itself. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet`. #[proc_macro_attribute] pub fn pallet(attr: TokenStream, item: TokenStream) -> TokenStream { pallet::pallet(attr, item) @@ -408,19 +287,28 @@ pub fn transactional(attr: TokenStream, input: TokenStream) -> TokenStream { transactional::transactional(attr, input).unwrap_or_else(|e| e.to_compile_error().into()) } +/// +/// --- +/// +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::require_transactional`. #[proc_macro_attribute] pub fn require_transactional(attr: TokenStream, input: TokenStream) -> TokenStream { transactional::require_transactional(attr, input) .unwrap_or_else(|e| e.to_compile_error().into()) } -/// Derive [`Clone`] but do not bound any generic. Docs are at `frame_support::CloneNoBound`. +/// Derive [`Clone`] but do not bound any generic. +/// +/// Docs at `frame_support::CloneNoBound`. #[proc_macro_derive(CloneNoBound)] pub fn derive_clone_no_bound(input: TokenStream) -> TokenStream { no_bound::clone::derive_clone_no_bound(input) } -/// Derive [`Debug`] but do not bound any generics. Docs are at `frame_support::DebugNoBound`. +/// Derive [`Debug`] but do not bound any generics. +/// +/// Docs at `frame_support::DebugNoBound`. #[proc_macro_derive(DebugNoBound)] pub fn derive_debug_no_bound(input: TokenStream) -> TokenStream { no_bound::debug::derive_debug_no_bound(input) @@ -452,14 +340,17 @@ pub fn derive_runtime_debug_no_bound(input: TokenStream) -> TokenStream { } } -/// Derive [`PartialEq`] but do not bound any generic. Docs are at -/// `frame_support::PartialEqNoBound`. +/// Derive [`PartialEq`] but do not bound any generic. +/// +/// Docs at `frame_support::PartialEqNoBound`. #[proc_macro_derive(PartialEqNoBound)] pub fn derive_partial_eq_no_bound(input: TokenStream) -> TokenStream { no_bound::partial_eq::derive_partial_eq_no_bound(input) } -/// Derive [`Eq`] but do no bound any generic. Docs are at `frame_support::EqNoBound`. +/// DeriveEq but do no bound any generic. +/// +/// Docs at `frame_support::EqNoBound`. #[proc_macro_derive(EqNoBound)] pub fn derive_eq_no_bound(input: TokenStream) -> TokenStream { let input = syn::parse_macro_input!(input as syn::DeriveInput); @@ -494,6 +385,7 @@ pub fn derive_default_no_bound(input: TokenStream) -> TokenStream { no_bound::default::derive_default_no_bound(input) } +/// Macro used internally in FRAME to generate the crate version for a pallet. #[proc_macro] pub fn crate_to_crate_version(input: TokenStream) -> TokenStream { crate_version::crate_to_crate_version(input) @@ -555,6 +447,11 @@ pub fn __create_tt_macro(input: TokenStream) -> TokenStream { tt_macro::create_tt_return_macro(input) } +/// +/// --- +/// +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::storage_alias`. #[proc_macro_attribute] pub fn storage_alias(attributes: TokenStream, input: TokenStream) -> TokenStream { storage_alias::storage_alias(attributes.into(), input.into()) @@ -603,6 +500,11 @@ pub fn storage_alias(attributes: TokenStream, input: TokenStream) -> TokenStream /// default to `A` from the `impl A for B` part of the default impl. This is useful for scenarios /// where all of the relevant types are already in scope via `use` statements. /// +/// In case the `default_impl_path` is scoped to a different module such as +/// `some::path::TestTraitImpl`, the same scope is assumed for the `disambiguation_path`, i.e. +/// `some::A`. This enables the use of `derive_impl` attribute without having to specify the +/// `disambiguation_path` in most (if not all) uses within FRAME's context. +/// /// Conversely, the `default_impl_path` argument is required and cannot be omitted. /// /// Optionally, `no_aggregated_types` can be specified as follows: @@ -785,24 +687,21 @@ pub fn derive_impl(attrs: TokenStream, input: TokenStream) -> TokenStream { .into() } -/// The optional attribute `#[pallet::no_default]` can be attached to trait items within a -/// `Config` trait impl that has [`#[pallet::config(with_default)]`](`macro@config`) attached. /// -/// Attaching this attribute to a trait item ensures that that trait item will not be used as a -/// default with the [`#[derive_impl(..)]`](`macro@derive_impl`) attribute macro. +/// --- +/// +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::no_default`. #[proc_macro_attribute] pub fn no_default(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The optional attribute `#[pallet::no_default_bounds]` can be attached to trait items within a -/// `Config` trait impl that has [`#[pallet::config(with_default)]`](`macro@config`) attached. /// -/// Attaching this attribute to a trait item ensures that the generated trait `DefaultConfig` -/// will not have any bounds for this trait item. +/// --- /// -/// As an example, if you have a trait item `type AccountId: SomeTrait;` in your `Config` trait, -/// the generated `DefaultConfig` will only have `type AccountId;` with no trait bound. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::no_default_bounds`. #[proc_macro_attribute] pub fn no_default_bounds(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() @@ -882,6 +781,11 @@ pub fn register_default_impl(attrs: TokenStream, tokens: TokenStream) -> TokenSt } } +/// +/// --- +/// +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_prelude::inject_runtime_type`. #[proc_macro_attribute] pub fn inject_runtime_type(_: TokenStream, tokens: TokenStream) -> TokenStream { let item = tokens.clone(); @@ -915,75 +819,11 @@ fn pallet_macro_stub() -> TokenStream { .into() } -/// The mandatory attribute `#[pallet::config]` defines the configurable options for the pallet. -/// -/// Item must be defined as: -/// -/// ```ignore -/// #[pallet::config] -/// pub trait Config: frame_system::Config + $optionally_some_other_supertraits -/// $optional_where_clause -/// { -/// ... -/// } -/// ``` -/// -/// I.e. a regular trait definition named `Config`, with the supertrait -/// `frame_system::pallet::Config`, and optionally other supertraits and a where clause. -/// (Specifying other supertraits here is known as [tight -/// coupling](https://docs.substrate.io/reference/how-to-guides/pallet-design/use-tight-coupling/)) -/// -/// The associated type `RuntimeEvent` is reserved. If defined, it must have the bounds -/// `From` and `IsType<::RuntimeEvent>`. -/// -/// [`pallet::event`](`macro@event`) must be present if `RuntimeEvent` exists as a config item -/// in your `#[pallet::config]`. -/// -/// ## Optional: `with_default` /// -/// An optional `with_default` argument may also be specified. Doing so will automatically -/// generate a `DefaultConfig` trait inside your pallet which is suitable for use with -/// [`[#[derive_impl(..)]`](`macro@derive_impl`) to derive a default testing config: -/// -/// ```ignore -/// #[pallet::config(with_default)] -/// pub trait Config: frame_system::Config { -/// type RuntimeEvent: Parameter -/// + Member -/// + From> -/// + Debug -/// + IsType<::RuntimeEvent>; -/// -/// #[pallet::no_default] -/// type BaseCallFilter: Contains; -/// // ... -/// } -/// ``` -/// -/// As shown above, you may also attach the [`#[pallet::no_default]`](`macro@no_default`) -/// attribute to specify that a particular trait item _cannot_ be used as a default when a test -/// `Config` is derived using the [`#[derive_impl(..)]`](`macro@derive_impl`) attribute macro. -/// This will cause that particular trait item to simply not appear in default testing configs -/// based on this config (the trait item will not be included in `DefaultConfig`). -/// -/// ### `DefaultConfig` Caveats -/// -/// The auto-generated `DefaultConfig` trait: -/// - is always a _subset_ of your pallet's `Config` trait. -/// - can only contain items that don't rely on externalities, such as `frame_system::Config`. -/// -/// Trait items that _do_ rely on externalities should be marked with -/// [`#[pallet::no_default]`](`macro@no_default`) -/// -/// Consequently: -/// - Any items that rely on externalities _must_ be marked with -/// [`#[pallet::no_default]`](`macro@no_default`) or your trait will fail to compile when used -/// with [`derive_impl`](`macro@derive_impl`). -/// - Items marked with [`#[pallet::no_default]`](`macro@no_default`) are entirely excluded from the -/// `DefaultConfig` trait, and therefore any impl of `DefaultConfig` doesn't need to implement -/// such items. +/// --- /// -/// For more information, see [`macro@derive_impl`]. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::config`. #[proc_macro_attribute] pub fn config(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() @@ -992,7 +832,7 @@ pub fn config(_: TokenStream, _: TokenStream) -> TokenStream { /// /// --- /// -/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// Rust-Analyzer Users: Documentation for this macro can be found at /// `frame_support::pallet_macros::constant`. #[proc_macro_attribute] pub fn constant(_: TokenStream, _: TokenStream) -> TokenStream { @@ -1002,106 +842,48 @@ pub fn constant(_: TokenStream, _: TokenStream) -> TokenStream { /// /// --- /// -/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// Rust-Analyzer Users: Documentation for this macro can be found at /// `frame_support::pallet_macros::constant_name`. #[proc_macro_attribute] pub fn constant_name(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// To bypass the `frame_system::Config` supertrait check, use the attribute -/// `pallet::disable_frame_system_supertrait_check`, e.g.: /// -/// ```ignore -/// #[pallet::config] -/// #[pallet::disable_frame_system_supertrait_check] -/// pub trait Config: pallet_timestamp::Config {} -/// ``` +/// --- /// -/// NOTE: Bypassing the `frame_system::Config` supertrait check is typically desirable when you -/// want to write an alternative to the `frame_system` pallet. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::disable_frame_system_supertrait_check`. #[proc_macro_attribute] pub fn disable_frame_system_supertrait_check(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// To generate a `Store` trait associating all storages, annotate your `Pallet` struct with -/// the attribute `#[pallet::generate_store($vis trait Store)]`, e.g.: /// -/// ```ignore -/// #[pallet::pallet] -/// #[pallet::generate_store(pub(super) trait Store)] -/// pub struct Pallet(_); -/// ``` -/// More precisely, the `Store` trait contains an associated type for each storage. It is -/// implemented for `Pallet` allowing access to the storage from pallet struct. -/// -/// Thus when defining a storage named `Foo`, it can later be accessed from `Pallet` using -/// `::Foo`. -/// -/// NOTE: this attribute is only valid when applied _directly_ to your `Pallet` struct -/// definition. -#[proc_macro_attribute] -pub fn generate_store(_: TokenStream, _: TokenStream) -> TokenStream { - pallet_macro_stub() -} - -/// Because the `pallet::pallet` macro implements `GetStorageVersion`, the current storage -/// version needs to be communicated to the macro. This can be done by using the -/// `pallet::storage_version` attribute: -/// -/// ```ignore -/// const STORAGE_VERSION: StorageVersion = StorageVersion::new(5); -/// -/// #[pallet::pallet] -/// #[pallet::storage_version(STORAGE_VERSION)] -/// pub struct Pallet(_); -/// ``` +/// --- /// -/// If not present, the current storage version is set to the default value. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::storage_version`. #[proc_macro_attribute] pub fn storage_version(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The `#[pallet::hooks]` attribute allows you to specify a `Hooks` implementation for -/// `Pallet` that specifies pallet-specific logic. -/// -/// The item the attribute attaches to must be defined as follows: -/// ```ignore -/// #[pallet::hooks] -/// impl Hooks> for Pallet $optional_where_clause { -/// ... -/// } -/// ``` -/// I.e. a regular trait implementation with generic bound: `T: Config`, for the trait -/// `Hooks>` (they are defined in preludes), for the type `Pallet` and -/// with an optional where clause. -/// -/// If no `#[pallet::hooks]` exists, then the following default implementation is -/// automatically generated: -/// ```ignore -/// #[pallet::hooks] -/// impl Hooks> for Pallet {} -/// ``` -/// -/// ## Macro expansion -/// -/// The macro implements the traits `OnInitialize`, `OnIdle`, `OnFinalize`, `OnRuntimeUpgrade`, -/// `OffchainWorker`, and `IntegrityTest` using the provided `Hooks` implementation. /// -/// NOTE: `OnRuntimeUpgrade` is implemented with `Hooks::on_runtime_upgrade` and some -/// additional logic. E.g. logic to write the pallet version into storage. +/// --- /// -/// NOTE: The macro also adds some tracing logic when implementing the above traits. The -/// following hooks emit traces: `on_initialize`, `on_finalize` and `on_runtime_upgrade`. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::hooks`. #[proc_macro_attribute] pub fn hooks(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// Each dispatchable needs to define a weight with `#[pallet::weight($expr)]` attribute, the -/// first argument must be `origin: OriginFor`. +/// +/// --- +/// +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::weight`. #[proc_macro_attribute] pub fn weight(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() @@ -1110,8 +892,8 @@ pub fn weight(_: TokenStream, _: TokenStream) -> TokenStream { /// /// --- /// -/// **Rust-Analyzer users**: See the documentation of the Rust item in -/// [`frame_support::pallet_macros::call`](../../frame_support/pallet_macros/attr.call.html). +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::compact`. #[proc_macro_attribute] pub fn compact(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() @@ -1120,8 +902,8 @@ pub fn compact(_: TokenStream, _: TokenStream) -> TokenStream { /// /// --- /// -/// **Rust-Analyzer users**: See the documentation of the Rust item in -/// [`frame_support::pallet_macros::call`](../../frame_support/pallet_macros/attr.call.html). +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::call`. #[proc_macro_attribute] pub fn call(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() @@ -1132,164 +914,60 @@ pub fn call(_: TokenStream, _: TokenStream) -> TokenStream { /// /// --- /// -/// **Rust-Analyzer users**: See the documentation of the Rust item in -/// [`frame_support::pallet_macros::call`](../../frame_support/pallet_macros/attr.call.html). +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::call_index`. #[proc_macro_attribute] pub fn call_index(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// Each dispatchable may be annotated with the `#[pallet::feeless_if($closure)]` attribute, -/// which explicitly defines the condition for the dispatchable to be feeless. /// -/// The arguments for the closure must be the referenced arguments of the dispatchable function. -/// -/// The closure must return `bool`. -/// -/// ### Example -/// ```ignore -/// #[pallet::feeless_if(|_origin: &OriginFor, something: &u32| -> bool { -/// *something == 0 -/// })] -/// pub fn do_something(origin: OriginFor, something: u32) -> DispatchResult { -/// .... -/// } -/// ``` -/// -/// Please note that this only works for signed dispatchables and requires a signed extension -/// such as `SkipCheckIfFeeless` as defined in `pallet-skip-feeless-payment` to wrap the existing -/// payment extension. Else, this is completely ignored and the dispatchable is still charged. +/// --- /// -/// ### Macro expansion +/// Rust-Analyzer Users: Documentation for this macro can be found at /// -/// The macro implements the `CheckIfFeeless` trait on the dispatchable and calls the corresponding -/// closure in the implementation. +/// `frame_support::pallet_macros::feeless_if`. #[proc_macro_attribute] pub fn feeless_if(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// Allows you to define some extra constants to be added into constant metadata. -/// -/// Item must be defined as: /// -/// ```ignore -/// #[pallet::extra_constants] -/// impl Pallet where $optional_where_clause { -/// /// $some_doc -/// $vis fn $fn_name() -> $some_return_type { -/// ... -/// } -/// ... -/// } -/// ``` -/// I.e. a regular rust `impl` block with some optional where clause and functions with 0 args, -/// 0 generics, and some return type. +/// --- /// -/// ## Macro expansion +/// Rust-Analyzer Users: Documentation for this macro can be found at /// -/// The macro add some extra constants to pallet constant metadata. +/// `frame_support::pallet_macros::extra_constants`. #[proc_macro_attribute] pub fn extra_constants(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The `#[pallet::error]` attribute allows you to define an error enum that will be returned -/// from the dispatchable when an error occurs. The information for this error type is then -/// stored in metadata. -/// -/// Item must be defined as: /// -/// ```ignore -/// #[pallet::error] -/// pub enum Error { -/// /// $some_optional_doc -/// $SomeFieldLessVariant, -/// /// $some_more_optional_doc -/// $SomeVariantWithOneField(FieldType), -/// ... -/// } -/// ``` -/// I.e. a regular enum named `Error`, with generic `T` and fieldless or multiple-field -/// variants. -/// -/// Any field type in the enum variants must implement `TypeInfo` in order to be properly used -/// in the metadata, and its encoded size should be as small as possible, preferably 1 byte in -/// size in order to reduce storage size. The error enum itself has an absolute maximum encoded -/// size specified by `MAX_MODULE_ERROR_ENCODED_SIZE`. -/// -/// (1 byte can still be 256 different errors. The more specific the error, the easier it is to -/// diagnose problems and give a better experience to the user. Don't skimp on having lots of -/// individual error conditions.) -/// -/// Field types in enum variants must also implement `PalletError`, otherwise the pallet will -/// fail to compile. Rust primitive types have already implemented the `PalletError` trait -/// along with some commonly used stdlib types such as [`Option`] and `PhantomData`, and hence -/// in most use cases, a manual implementation is not necessary and is discouraged. -/// -/// The generic `T` must not bound anything and a `where` clause is not allowed. That said, -/// bounds and/or a where clause should not needed for any use-case. -/// -/// ## Macro expansion -/// -/// The macro implements the [`Debug`] trait and functions `as_u8` using variant position, and -/// `as_str` using variant doc. +/// --- /// -/// The macro also implements `From>` for `&'static str` and `From>` for -/// `DispatchError`. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::error`. #[proc_macro_attribute] pub fn error(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The `#[pallet::event]` attribute allows you to define pallet events. Pallet events are -/// stored under the `system` / `events` key when the block is applied (and then replaced when -/// the next block writes it's events). -/// -/// The Event enum must be defined as follows: -/// -/// ```ignore -/// #[pallet::event] -/// #[pallet::generate_deposit($visibility fn deposit_event)] // Optional -/// pub enum Event<$some_generic> $optional_where_clause { -/// /// Some doc -/// $SomeName($SomeType, $YetanotherType, ...), -/// ... -/// } -/// ``` /// -/// I.e. an enum (with named or unnamed fields variant), named `Event`, with generic: none or -/// `T` or `T: Config`, and optional w here clause. +/// --- /// -/// Each field must implement [`Clone`], [`Eq`], [`PartialEq`], `Encode`, `Decode`, and -/// [`Debug`] (on std only). For ease of use, bound by the trait `Member`, available in -/// `frame_support::pallet_prelude`. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::event`. #[proc_macro_attribute] pub fn event(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The attribute `#[pallet::generate_deposit($visibility fn deposit_event)]` generates a -/// helper function on `Pallet` that handles deposit events. -/// -/// NOTE: For instantiable pallets, the event must be generic over `T` and `I`. -/// -/// ## Macro expansion /// -/// The macro will add on enum `Event` the attributes: -/// * `#[derive(frame_support::CloneNoBound)]` -/// * `#[derive(frame_support::EqNoBound)]` -/// * `#[derive(frame_support::PartialEqNoBound)]` -/// * `#[derive(frame_support::RuntimeDebugNoBound)]` -/// * `#[derive(codec::Encode)]` -/// * `#[derive(codec::Decode)]` -/// -/// The macro implements `From>` for (). -/// -/// The macro implements a metadata function on `Event` returning the `EventMetadata`. +/// --- /// -/// If `#[pallet::generate_deposit]` is present then the macro implements `fn deposit_event` on -/// `Pallet`. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::generate_deposit`. #[proc_macro_attribute] pub fn generate_deposit(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() @@ -1298,105 +976,68 @@ pub fn generate_deposit(_: TokenStream, _: TokenStream) -> TokenStream { /// /// --- /// -/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// Rust-Analyzer Users: Documentation for this macro can be found at /// `frame_support::pallet_macros::storage`. #[proc_macro_attribute] pub fn storage(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The optional attribute `#[pallet::getter(fn $my_getter_fn_name)]` allows you to define a -/// getter function on `Pallet`. /// -/// Also see [`pallet::storage`](`macro@storage`) +/// --- +/// +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::getter`. #[proc_macro_attribute] pub fn getter(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The optional attribute `#[pallet::storage_prefix = "SomeName"]` allows you to define the -/// storage prefix to use. This is helpful if you wish to rename the storage field but don't -/// want to perform a migration. /// -/// E.g: -/// -/// ```ignore -/// #[pallet::storage] -/// #[pallet::storage_prefix = "foo"] -/// #[pallet::getter(fn my_storage)] -/// pub(super) type MyStorage = StorageMap; -/// ``` -/// -/// or +/// --- /// -/// ```ignore -/// #[pallet::storage] -/// #[pallet::getter(fn my_storage)] -/// pub(super) type MyStorage = StorageMap<_, Blake2_128Concat, u32, u32>; -/// ``` +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::storage_prefix`. #[proc_macro_attribute] pub fn storage_prefix(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The optional attribute `#[pallet::unbounded]` declares the storage as unbounded. When -/// implementating the storage info (when `#[pallet::generate_storage_info]` is specified on -/// the pallet struct placeholder), the size of the storage will be declared as unbounded. This -/// can be useful for storage which can never go into PoV (Proof of Validity). +/// +/// --- +/// +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::unbounded`. #[proc_macro_attribute] pub fn unbounded(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The optional attribute `#[pallet::whitelist_storage]` will declare the -/// storage as whitelisted from benchmarking. Doing so will exclude reads of -/// that value's storage key from counting towards weight calculations during -/// benchmarking. -/// -/// This attribute should only be attached to storages that are known to be -/// read/used in every block. This will result in a more accurate benchmarking weight. /// -/// ### Example -/// ```ignore -/// #[pallet::storage] -/// #[pallet::whitelist_storage] -/// pub(super) type Number = StorageValue<_, frame_system::pallet_prelude::BlockNumberFor::, ValueQuery>; -/// ``` +/// --- /// -/// NOTE: As with all `pallet::*` attributes, this one _must_ be written as -/// `#[pallet::whitelist_storage]` and can only be placed inside a `pallet` module in order for -/// it to work properly. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::whitelist_storage`. #[proc_macro_attribute] pub fn whitelist_storage(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The `#[pallet::type_value]` attribute lets you define a struct implementing the `Get` trait -/// to ease the use of storage types. This attribute is meant to be used alongside -/// [`#[pallet::storage]`](`macro@storage`) to define a storage's default value. This attribute -/// can be used multiple times. -/// -/// Item must be defined as: /// -/// ```ignore -/// #[pallet::type_value] -/// fn $MyDefaultName<$some_generic>() -> $default_type $optional_where_clause { $expr } -/// ``` -/// -/// I.e.: a function definition with generics none or `T: Config` and a returned type. -/// -/// E.g.: +/// --- /// -/// ```ignore -/// #[pallet::type_value] -/// fn MyDefault() -> T::Balance { 3.into() } -/// ``` +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::disable_try_decode_storage`. +#[proc_macro_attribute] +pub fn disable_try_decode_storage(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + /// -/// ## Macro expansion +/// --- /// -/// The macro renames the function to some internal name, generates a struct with the original -/// name of the function and its generic, and implements `Get<$ReturnType>` by calling the user -/// defined function. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::type_value`. #[proc_macro_attribute] pub fn type_value(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() @@ -1405,7 +1046,7 @@ pub fn type_value(_: TokenStream, _: TokenStream) -> TokenStream { /// /// --- /// -/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// Rust-Analyzer Users: Documentation for this macro can be found at /// `frame_support::pallet_macros::genesis_config`. #[proc_macro_attribute] pub fn genesis_config(_: TokenStream, _: TokenStream) -> TokenStream { @@ -1415,115 +1056,48 @@ pub fn genesis_config(_: TokenStream, _: TokenStream) -> TokenStream { /// /// --- /// -/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// Rust-Analyzer Users: Documentation for this macro can be found at /// `frame_support::pallet_macros::genesis_build`. #[proc_macro_attribute] pub fn genesis_build(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The `#[pallet::inherent]` attribute allows the pallet to provide some -/// [inherent](https://docs.substrate.io/fundamentals/transaction-types/#inherent-transactions). -/// An inherent is some piece of data that is inserted by a block authoring node at block -/// creation time and can either be accepted or rejected by validators based on whether the -/// data falls within an acceptable range. -/// -/// The most common inherent is the `timestamp` that is inserted into every block. Since there -/// is no way to validate timestamps, validators simply check that the timestamp reported by -/// the block authoring node falls within an acceptable range. -/// -/// Item must be defined as: -/// -/// ```ignore -/// #[pallet::inherent] -/// impl ProvideInherent for Pallet { -/// // ... regular trait implementation -/// } -/// ``` /// -/// I.e. a trait implementation with bound `T: Config`, of trait `ProvideInherent` for type -/// `Pallet`, and some optional where clause. -/// -/// ## Macro expansion +/// --- /// -/// The macro currently makes no use of this information, but it might use this information in -/// the future to give information directly to `construct_runtime`. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::inherent`. #[proc_macro_attribute] pub fn inherent(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The `#[pallet::validate_unsigned]` attribute allows the pallet to validate some unsigned -/// transaction: -/// -/// Item must be defined as: -/// -/// ```ignore -/// #[pallet::validate_unsigned] -/// impl ValidateUnsigned for Pallet { -/// // ... regular trait implementation -/// } -/// ``` -/// -/// I.e. a trait implementation with bound `T: Config`, of trait `ValidateUnsigned` for type -/// `Pallet`, and some optional where clause. /// -/// NOTE: There is also the `sp_runtime::traits::SignedExtension` trait that can be used to add -/// some specific logic for transaction validation. -/// -/// ## Macro expansion +/// --- /// -/// The macro currently makes no use of this information, but it might use this information in -/// the future to give information directly to `construct_runtime`. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::validate_unsigned`. #[proc_macro_attribute] pub fn validate_unsigned(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The `#[pallet::origin]` attribute allows you to define some origin for the pallet. -/// -/// Item must be either a type alias, an enum, or a struct. It needs to be public. -/// -/// E.g.: -/// -/// ```ignore -/// #[pallet::origin] -/// pub struct Origin(PhantomData<(T)>); -/// ``` /// -/// **WARNING**: modifying origin changes the outer runtime origin. This outer runtime origin -/// can be stored on-chain (e.g. in `pallet-scheduler`), thus any change must be done with care -/// as it might require some migration. +/// --- /// -/// NOTE: for instantiable pallets, the origin must be generic over `T` and `I`. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::origin`. #[proc_macro_attribute] pub fn origin(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The `#[pallet::composite_enum]` attribute allows you to define an enum that gets composed as an -/// aggregate enum by `construct_runtime`. This is similar in principle with `#[pallet::event]` and -/// `#[pallet::error]`. -/// -/// The attribute currently only supports enum definitions, and identifiers that are named -/// `FreezeReason`, `HoldReason`, `LockId` or `SlashReason`. Arbitrary identifiers for the enum are -/// not supported. The aggregate enum generated by `construct_runtime` will have the name of -/// `RuntimeFreezeReason`, `RuntimeHoldReason`, `RuntimeLockId` and `RuntimeSlashReason` -/// respectively. /// -/// NOTE: The aggregate enum generated by `construct_runtime` generates a conversion function from -/// the pallet enum to the aggregate enum, and automatically derives the following traits: -/// -/// ```ignore -/// Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, MaxEncodedLen, TypeInfo, -/// RuntimeDebug -/// ``` +/// --- /// -/// For ease of usage, when no `#[derive]` attributes are found for the enum under -/// `#[pallet::composite_enum]`, the aforementioned traits are automatically derived for it. The -/// inverse is also true: if there are any `#[derive]` attributes found for the enum, then no traits -/// will automatically be derived for it (this implies that you need to provide the -/// `frame_support::traits::VariantCount` implementation). +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::composite_enum`. #[proc_macro_attribute] pub fn composite_enum(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() @@ -1579,25 +1153,11 @@ pub fn task_index(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// Can be attached to a module. Doing so will declare that module as importable into a pallet -/// via [`#[import_section]`](`macro@import_section`). -/// -/// Note that sections are imported by their module name/ident, and should be referred to by -/// their _full path_ from the perspective of the target pallet. Do not attempt to make use -/// of `use` statements to bring pallet sections into scope, as this will not work (unless -/// you do so as part of a wildcard import, in which case it will work). -/// -/// ## Naming Logistics /// -/// Also note that because of how `#[pallet_section]` works, pallet section names must be -/// globally unique _within the crate in which they are defined_. For more information on -/// why this must be the case, see macro_magic's -/// [`#[export_tokens]`](https://docs.rs/macro_magic/latest/macro_magic/attr.export_tokens.html) macro. +/// --- /// -/// Optionally, you may provide an argument to `#[pallet_section]` such as -/// `#[pallet_section(some_ident)]`, in the event that there is another pallet section in -/// same crate with the same ident/name. The ident you specify can then be used instead of -/// the module's ident name when you go to import it via `#[import_section]`. +/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// `frame_support::pallet_macros::pallet_section`. #[proc_macro_attribute] pub fn pallet_section(attr: TokenStream, tokens: TokenStream) -> TokenStream { let tokens_clone = tokens.clone(); @@ -1611,39 +1171,11 @@ pub fn pallet_section(attr: TokenStream, tokens: TokenStream) -> TokenStream { } } -/// An attribute macro that can be attached to a module declaration. Doing so will -/// Imports the contents of the specified external pallet section that was defined -/// previously using [`#[pallet_section]`](`macro@pallet_section`). /// -/// ## Example -/// ```ignore -/// #[import_section(some_section)] -/// #[pallet] -/// pub mod pallet { -/// // ... -/// } -/// ``` -/// where `some_section` was defined elsewhere via: -/// ```ignore -/// #[pallet_section] -/// pub mod some_section { -/// // ... -/// } -/// ``` -/// -/// This will result in the contents of `some_section` being _verbatim_ imported into -/// the pallet above. Note that since the tokens for `some_section` are essentially -/// copy-pasted into the target pallet, you cannot refer to imports that don't also -/// exist in the target pallet, but this is easily resolved by including all relevant -/// `use` statements within your pallet section, so they are imported as well, or by -/// otherwise ensuring that you have the same imports on the target pallet. -/// -/// It is perfectly permissible to import multiple pallet sections into the same pallet, -/// which can be done by having multiple `#[import_section(something)]` attributes -/// attached to the pallet. +/// --- /// -/// Note that sections are imported by their module name/ident, and should be referred to by -/// their _full path_ from the perspective of the target pallet. +/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// `frame_support::pallet_macros::import_section`. #[import_tokens_attr { format!( "{}::macro_magic", @@ -1689,6 +1221,73 @@ pub fn import_section(attr: TokenStream, tokens: TokenStream) -> TokenStream { .into() } +/// Construct a runtime, with the given name and the given pallets. +/// +/// # Example: +/// +/// ```ignore +/// #[frame_support::runtime] +/// mod runtime { +/// // The main runtime +/// #[runtime::runtime] +/// // Runtime Types to be generated +/// #[runtime::derive( +/// RuntimeCall, +/// RuntimeEvent, +/// RuntimeError, +/// RuntimeOrigin, +/// RuntimeFreezeReason, +/// RuntimeHoldReason, +/// RuntimeSlashReason, +/// RuntimeLockId, +/// RuntimeTask, +/// )] +/// pub struct Runtime; +/// +/// #[runtime::pallet_index(0)] +/// pub type System = frame_system; +/// +/// #[runtime::pallet_index(1)] +/// pub type Test = path::to::test; +/// +/// // Pallet with instance. +/// #[runtime::pallet_index(2)] +/// pub type Test2_Instance1 = test2; +/// +/// // Pallet with calls disabled. +/// #[runtime::pallet_index(3)] +/// #[runtime::disable_call] +/// pub type Test3 = test3; +/// +/// // Pallet with unsigned extrinsics disabled. +/// #[runtime::pallet_index(4)] +/// #[runtime::disable_unsigned] +/// pub type Test4 = test4; +/// } +/// ``` +/// +/// # Legacy Ordering +/// +/// An optional attribute can be defined as #[frame_support::runtime(legacy_ordering)] to +/// ensure that the order of hooks is same as the order of pallets (and not based on the +/// pallet_index). This is to support legacy runtimes and should be avoided for new ones. +/// +/// # Note +/// +/// The population of the genesis storage depends on the order of pallets. So, if one of your +/// pallets depends on another pallet, the pallet that is depended upon needs to come before +/// the pallet depending on it. +/// +/// # Type definitions +/// +/// * The macro generates a type alias for each pallet to their `Pallet`. E.g. `type System = +/// frame_system::Pallet` +#[cfg(feature = "experimental")] +#[proc_macro_attribute] +pub fn runtime(attr: TokenStream, item: TokenStream) -> TokenStream { + runtime::runtime(attr, item) +} + /// Mark a module that contains dynamic parameters. /// /// See the `pallet_parameters` for a full example. diff --git a/substrate/frame/support/procedural/src/pallet/expand/call.rs b/substrate/frame/support/procedural/src/pallet/expand/call.rs index f43faba1ee0c8c14cb808b2f64f58446ebe7ffae..f395872c8a80a6b9a2ddd391ea106284140d53ba 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/call.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/call.rs @@ -18,7 +18,7 @@ use crate::{ pallet::{ expand::warnings::{weight_constant_warning, weight_witness_warning}, - parse::call::{CallVariantDef, CallWeightDef}, + parse::call::CallWeightDef, Def, }, COUNTER, @@ -112,22 +112,7 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { } debug_assert_eq!(fn_weight.len(), methods.len()); - let map_fn_docs = if !def.dev_mode { - // Emit the [`Pallet::method`] documentation only for non-dev modes. - |method: &CallVariantDef| { - let reference = format!("See [`Pallet::{}`].", method.name); - quote!(#reference) - } - } else { - // For the dev-mode do not provide a documenation link as it will break the - // `cargo doc` if the pallet is private inside a test. - |method: &CallVariantDef| { - let reference = format!("See `Pallet::{}`.", method.name); - quote!(#reference) - } - }; - - let fn_doc = methods.iter().map(map_fn_docs).collect::>(); + let fn_doc = methods.iter().map(|method| &method.docs).collect::>(); let args_name = methods .iter() @@ -309,7 +294,7 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { ), #( #cfg_attrs - #[doc = #fn_doc] + #( #[doc = #fn_doc] )* #[codec(index = #call_index)] #fn_name { #( diff --git a/substrate/frame/support/procedural/src/pallet/expand/hooks.rs b/substrate/frame/support/procedural/src/pallet/expand/hooks.rs index 5044d4285bb64aceeccd14959743a494f3c94f85..3623b595268d081ee7b5750a5431f208383f8517 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/hooks.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/hooks.rs @@ -42,7 +42,7 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { >::name::().unwrap_or("") }; - let initialize_on_chain_storage_version = if let Some(current_version) = + let initialize_on_chain_storage_version = if let Some(in_code_version) = &def.pallet_struct.storage_version { quote::quote! { @@ -50,9 +50,9 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { target: #frame_support::LOG_TARGET, "🐥 New pallet {:?} detected in the runtime. Initializing the on-chain storage version to match the storage version defined in the pallet: {:?}", #pallet_name, - #current_version + #in_code_version ); - #current_version.put::(); + #in_code_version.put::(); } } else { quote::quote! { @@ -73,10 +73,10 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { #frame_support::__private::log::info!( target: #frame_support::LOG_TARGET, "⚠️ {} declares internal migrations (which *might* execute). \ - On-chain `{:?}` vs current storage version `{:?}`", + On-chain `{:?}` vs in-code storage version `{:?}`", #pallet_name, ::on_chain_storage_version(), - ::current_storage_version(), + ::in_code_storage_version(), ); } } else { @@ -102,23 +102,23 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { }; // If a storage version is set, we should ensure that the storage version on chain matches the - // current storage version. This assumes that `Executive` is running custom migrations before + // in-code storage version. This assumes that `Executive` is running custom migrations before // the pallets are called. let post_storage_version_check = if def.pallet_struct.storage_version.is_some() { quote::quote! { let on_chain_version = ::on_chain_storage_version(); - let current_version = ::current_storage_version(); + let in_code_version = ::in_code_storage_version(); - if on_chain_version != current_version { + if on_chain_version != in_code_version { #frame_support::__private::log::error!( target: #frame_support::LOG_TARGET, - "{}: On chain storage version {:?} doesn't match current storage version {:?}.", + "{}: On chain storage version {:?} doesn't match in-code storage version {:?}.", #pallet_name, on_chain_version, - current_version, + in_code_version, ); - return Err("On chain and current storage version do not match. Missing runtime upgrade?".into()); + return Err("On chain and in-code storage version do not match. Missing runtime upgrade?".into()); } } } else { @@ -175,6 +175,22 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { } } + impl<#type_impl_gen> + #frame_support::traits::OnPoll<#frame_system::pallet_prelude::BlockNumberFor::> + for #pallet_ident<#type_use_gen> #where_clause + { + fn on_poll( + n: #frame_system::pallet_prelude::BlockNumberFor::, + weight: &mut #frame_support::weights::WeightMeter + ) { + < + Self as #frame_support::traits::Hooks< + #frame_system::pallet_prelude::BlockNumberFor:: + > + >::on_poll(n, weight); + } + } + impl<#type_impl_gen> #frame_support::traits::OnInitialize<#frame_system::pallet_prelude::BlockNumberFor::> for #pallet_ident<#type_use_gen> #where_clause diff --git a/substrate/frame/support/procedural/src/pallet/expand/mod.rs b/substrate/frame/support/procedural/src/pallet/expand/mod.rs index 3da7d9293c7cc1a1676e5cb0e4afe9ab08b25948..067839c2846353cb995d4c160ac2f38313c19d01 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/mod.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/mod.rs @@ -31,7 +31,6 @@ mod instances; mod origin; mod pallet_struct; mod storage; -mod store_trait; mod tasks; mod tt_default_parts; mod type_value; @@ -68,7 +67,6 @@ pub fn expand(mut def: Def) -> proc_macro2::TokenStream { let storages = storage::expand_storages(&mut def); let inherents = inherent::expand_inherents(&mut def); let instances = instances::expand_instances(&mut def); - let store_trait = store_trait::expand_store_trait(&mut def); let hooks = hooks::expand_hooks(&mut def); let genesis_build = genesis_build::expand_genesis_build(&mut def); let genesis_config = genesis_config::expand_genesis_config(&mut def); @@ -110,7 +108,6 @@ storage item. Otherwise, all storage items are listed among [*Type Definitions*] #storages #inherents #instances - #store_trait #hooks #genesis_build #genesis_config diff --git a/substrate/frame/support/procedural/src/pallet/expand/pallet_struct.rs b/substrate/frame/support/procedural/src/pallet/expand/pallet_struct.rs index c2102f0284dbeeadb08fc9122e5fe824032dfb82..7cdf6bde9de87d0bd4eed70de93173bc9bf4e3ff 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/pallet_struct.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/pallet_struct.rs @@ -160,7 +160,7 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { } ); - let (storage_version, current_storage_version_ty) = + let (storage_version, in_code_storage_version_ty) = if let Some(v) = def.pallet_struct.storage_version.as_ref() { (quote::quote! { #v }, quote::quote! { #frame_support::traits::StorageVersion }) } else { @@ -203,9 +203,9 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { for #pallet_ident<#type_use_gen> #config_where_clause { - type CurrentStorageVersion = #current_storage_version_ty; + type InCodeStorageVersion = #in_code_storage_version_ty; - fn current_storage_version() -> Self::CurrentStorageVersion { + fn in_code_storage_version() -> Self::InCodeStorageVersion { #storage_version } diff --git a/substrate/frame/support/procedural/src/pallet/expand/storage.rs b/substrate/frame/support/procedural/src/pallet/expand/storage.rs index 96c2c8e3120b8f8fb76e8ee10b067ce3f13f62e9..937b068cfabd14e04b05177be2f0242eccd3abec 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/storage.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/storage.rs @@ -834,7 +834,10 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { .storages .iter() .filter_map(|storage| { - if storage.cfg_attrs.is_empty() { + // A little hacky; don't generate for cfg gated storages to not get compile errors + // when building "frame-feature-testing" gated storages in the "frame-support-test" + // crate. + if storage.try_decode && storage.cfg_attrs.is_empty() { let ident = &storage.ident; let gen = &def.type_use_generics(storage.attr_span); Some(quote::quote_spanned!(storage.attr_span => #ident<#gen> )) diff --git a/substrate/frame/support/procedural/src/pallet/expand/store_trait.rs b/substrate/frame/support/procedural/src/pallet/expand/store_trait.rs deleted file mode 100644 index 6635adc988157361a1478a5ec06589f7988b9676..0000000000000000000000000000000000000000 --- a/substrate/frame/support/procedural/src/pallet/expand/store_trait.rs +++ /dev/null @@ -1,67 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::pallet::Def; -use syn::spanned::Spanned; - -/// If attribute `#[pallet::generate_store(..)]` is defined then: -/// * generate Store trait with all storages, -/// * implement Store trait for Pallet. -pub fn expand_store_trait(def: &mut Def) -> proc_macro2::TokenStream { - let (trait_vis, trait_store, attribute_span) = - if let Some(store) = &def.pallet_struct.store { store } else { return Default::default() }; - - let type_impl_gen = &def.type_impl_generics(trait_store.span()); - let type_use_gen = &def.type_use_generics(trait_store.span()); - let pallet_ident = &def.pallet_struct.pallet; - - let mut where_clauses = vec![&def.config.where_clause]; - where_clauses.extend(def.storages.iter().map(|storage| &storage.where_clause)); - let completed_where_clause = super::merge_where_clauses(&where_clauses); - - let storage_names = &def.storages.iter().map(|storage| &storage.ident).collect::>(); - let storage_cfg_attrs = - &def.storages.iter().map(|storage| &storage.cfg_attrs).collect::>(); - let warnig_struct_name = syn::Ident::new("Store", *attribute_span); - let warning: syn::ItemStruct = syn::parse_quote!( - #[deprecated(note = r" - Use of `#[pallet::generate_store(pub(super) trait Store)]` will be removed after July 2023. - Check https://github.com/paritytech/substrate/pull/13535 for more details.")] - struct #warnig_struct_name; - ); - - quote::quote_spanned!(trait_store.span() => - const _:() = { - #warning - const _: Option<#warnig_struct_name> = None; - }; - #trait_vis trait #trait_store { - #( - #(#storage_cfg_attrs)* - type #storage_names; - )* - } - impl<#type_impl_gen> #trait_store for #pallet_ident<#type_use_gen> - #completed_where_clause - { - #( - #(#storage_cfg_attrs)* - type #storage_names = #storage_names<#type_use_gen>; - )* - } - ) -} diff --git a/substrate/frame/support/procedural/src/pallet/expand/tt_default_parts.rs b/substrate/frame/support/procedural/src/pallet/expand/tt_default_parts.rs index 7cc1415dfddf1514cf398555c650f4892097aefd..99364aaa96cd7808def45563392be207604151d7 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/tt_default_parts.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/tt_default_parts.rs @@ -28,6 +28,8 @@ pub fn expand_tt_default_parts(def: &mut Def) -> proc_macro2::TokenStream { syn::Ident::new(&format!("__tt_default_parts_{}", count), def.item.span()); let extra_parts_unique_id = syn::Ident::new(&format!("__tt_extra_parts_{}", count), def.item.span()); + let default_parts_unique_id_v2 = + syn::Ident::new(&format!("__tt_default_parts_v2_{}", count), def.item.span()); let call_part = def.call.as_ref().map(|_| quote::quote!(Call,)); @@ -81,6 +83,58 @@ pub fn expand_tt_default_parts(def: &mut Def) -> proc_macro2::TokenStream { .any(|c| matches!(c.composite_keyword, CompositeKeyword::SlashReason(_))) .then_some(quote::quote!(SlashReason,)); + let call_part_v2 = def.call.as_ref().map(|_| quote::quote!(+ Call)); + + let task_part_v2 = def.task_enum.as_ref().map(|_| quote::quote!(+ Task)); + + let storage_part_v2 = (!def.storages.is_empty()).then(|| quote::quote!(+ Storage)); + + let event_part_v2 = def.event.as_ref().map(|event| { + let gen = event.gen_kind.is_generic().then(|| quote::quote!()); + quote::quote!(+ Event #gen) + }); + + let error_part_v2 = def.error.as_ref().map(|_| quote::quote!(+ Error)); + + let origin_part_v2 = def.origin.as_ref().map(|origin| { + let gen = origin.is_generic.then(|| quote::quote!()); + quote::quote!(+ Origin #gen) + }); + + let config_part_v2 = def.genesis_config.as_ref().map(|genesis_config| { + let gen = genesis_config.gen_kind.is_generic().then(|| quote::quote!()); + quote::quote!(+ Config #gen) + }); + + let inherent_part_v2 = def.inherent.as_ref().map(|_| quote::quote!(+ Inherent)); + + let validate_unsigned_part_v2 = + def.validate_unsigned.as_ref().map(|_| quote::quote!(+ ValidateUnsigned)); + + let freeze_reason_part_v2 = def + .composites + .iter() + .any(|c| matches!(c.composite_keyword, CompositeKeyword::FreezeReason(_))) + .then_some(quote::quote!(+ FreezeReason)); + + let hold_reason_part_v2 = def + .composites + .iter() + .any(|c| matches!(c.composite_keyword, CompositeKeyword::HoldReason(_))) + .then_some(quote::quote!(+ HoldReason)); + + let lock_id_part_v2 = def + .composites + .iter() + .any(|c| matches!(c.composite_keyword, CompositeKeyword::LockId(_))) + .then_some(quote::quote!(+ LockId)); + + let slash_reason_part_v2 = def + .composites + .iter() + .any(|c| matches!(c.composite_keyword, CompositeKeyword::SlashReason(_))) + .then_some(quote::quote!(+ SlashReason)); + quote::quote!( // This macro follows the conventions as laid out by the `tt-call` crate. It does not // accept any arguments and simply returns the pallet parts, separated by commas, then @@ -138,5 +192,25 @@ pub fn expand_tt_default_parts(def: &mut Def) -> proc_macro2::TokenStream { } pub use #extra_parts_unique_id as tt_extra_parts; + + #[macro_export] + #[doc(hidden)] + macro_rules! #default_parts_unique_id_v2 { + { + $caller:tt + frame_support = [{ $($frame_support:ident)::* }] + } => { + $($frame_support)*::__private::tt_return! { + $caller + tokens = [{ + + Pallet #call_part_v2 #storage_part_v2 #event_part_v2 #error_part_v2 #origin_part_v2 #config_part_v2 + #inherent_part_v2 #validate_unsigned_part_v2 #freeze_reason_part_v2 #task_part_v2 + #hold_reason_part_v2 #lock_id_part_v2 #slash_reason_part_v2 + }] + } + }; + } + + pub use #default_parts_unique_id_v2 as tt_default_parts_v2; ) } diff --git a/substrate/frame/support/procedural/src/pallet/parse/helper.rs b/substrate/frame/support/procedural/src/pallet/parse/helper.rs index 538226a8745fe4d0792e36b9aaad9cbed9d4f88e..3187c9139c8f46c7664e96edb6870f68979146ae 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/helper.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/helper.rs @@ -148,6 +148,12 @@ impl MutItemAttrs for syn::ImplItemFn { } } +impl MutItemAttrs for syn::ItemType { + fn mut_item_attrs(&mut self) -> Option<&mut Vec> { + Some(&mut self.attrs) + } +} + /// Parse for `()` struct Unit; impl syn::parse::Parse for Unit { diff --git a/substrate/frame/support/procedural/src/pallet/parse/mod.rs b/substrate/frame/support/procedural/src/pallet/parse/mod.rs index e1efdbcc2027975d89a00b1037bd5d0af68998a3..b55f130a93a7ee7eef377678c4fc1ea664644601 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/mod.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/mod.rs @@ -558,8 +558,6 @@ mod keyword { syn::custom_keyword!(validate_unsigned); syn::custom_keyword!(type_value); syn::custom_keyword!(pallet); - syn::custom_keyword!(generate_store); - syn::custom_keyword!(Store); syn::custom_keyword!(extra_constants); syn::custom_keyword!(composite_enum); } diff --git a/substrate/frame/support/procedural/src/pallet/parse/pallet_struct.rs b/substrate/frame/support/procedural/src/pallet/parse/pallet_struct.rs index f4af86aa3e9936a3e77036f062ba8d4bf1a6c601..b645760998fe1c27e48c724dcde7cfcc658bb594 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/pallet_struct.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/pallet_struct.rs @@ -23,10 +23,8 @@ use syn::spanned::Spanned; mod keyword { syn::custom_keyword!(pallet); syn::custom_keyword!(Pallet); - syn::custom_keyword!(generate_store); syn::custom_keyword!(without_storage_info); syn::custom_keyword!(storage_version); - syn::custom_keyword!(Store); } /// Definition of the pallet pallet. @@ -37,23 +35,19 @@ pub struct PalletStructDef { pub instances: Vec, /// The keyword Pallet used (contains span). pub pallet: keyword::Pallet, - /// Whether the trait `Store` must be generated. - pub store: Option<(syn::Visibility, keyword::Store, proc_macro2::Span)>, /// The span of the pallet::pallet attribute. pub attr_span: proc_macro2::Span, /// Whether to specify the storages max encoded len when implementing `StorageInfoTrait`. /// Contains the span of the attribute. pub without_storage_info: Option, - /// The current storage version of the pallet. + /// The in-code storage version of the pallet. pub storage_version: Option, } /// Parse for one variant of: -/// * `#[pallet::generate_store($vis trait Store)]` /// * `#[pallet::without_storage_info]` /// * `#[pallet::storage_version(STORAGE_VERSION)]` pub enum PalletStructAttr { - GenerateStore { span: proc_macro2::Span, vis: syn::Visibility, keyword: keyword::Store }, WithoutStorageInfoTrait(proc_macro2::Span), StorageVersion { storage_version: syn::Path, span: proc_macro2::Span }, } @@ -61,9 +55,7 @@ pub enum PalletStructAttr { impl PalletStructAttr { fn span(&self) -> proc_macro2::Span { match self { - Self::GenerateStore { span, .. } | - Self::WithoutStorageInfoTrait(span) | - Self::StorageVersion { span, .. } => *span, + Self::WithoutStorageInfoTrait(span) | Self::StorageVersion { span, .. } => *span, } } } @@ -77,16 +69,7 @@ impl syn::parse::Parse for PalletStructAttr { content.parse::()?; let lookahead = content.lookahead1(); - if lookahead.peek(keyword::generate_store) { - content.parse::()?; - let generate_content; - syn::parenthesized!(generate_content in content); - let vis = generate_content.parse::()?; - generate_content.parse::()?; - let keyword = generate_content.parse::()?; - let span = content.span(); - Ok(Self::GenerateStore { vis, keyword, span }) - } else if lookahead.peek(keyword::without_storage_info) { + if lookahead.peek(keyword::without_storage_info) { let span = content.parse::()?.span(); Ok(Self::WithoutStorageInfoTrait(span)) } else if lookahead.peek(keyword::storage_version) { @@ -116,16 +99,12 @@ impl PalletStructDef { return Err(syn::Error::new(item.span(), msg)) }; - let mut store = None; let mut without_storage_info = None; let mut storage_version_found = None; let struct_attrs: Vec = helper::take_item_pallet_attrs(&mut item.attrs)?; for attr in struct_attrs { match attr { - PalletStructAttr::GenerateStore { vis, keyword, span } if store.is_none() => { - store = Some((vis, keyword, span)); - }, PalletStructAttr::WithoutStorageInfoTrait(span) if without_storage_info.is_none() => { @@ -162,7 +141,6 @@ impl PalletStructDef { index, instances, pallet, - store, attr_span, without_storage_info, storage_version: storage_version_found, diff --git a/substrate/frame/support/procedural/src/pallet/parse/storage.rs b/substrate/frame/support/procedural/src/pallet/parse/storage.rs index d1c7ba2e5e3c6788827577dfa843205d0be69174..9d96a18b56943db4715fabd30e8bc0f1c0aa7d28 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/storage.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/storage.rs @@ -29,6 +29,7 @@ mod keyword { syn::custom_keyword!(storage_prefix); syn::custom_keyword!(unbounded); syn::custom_keyword!(whitelist_storage); + syn::custom_keyword!(disable_try_decode_storage); syn::custom_keyword!(OptionQuery); syn::custom_keyword!(ResultQuery); syn::custom_keyword!(ValueQuery); @@ -39,11 +40,13 @@ mod keyword { /// * `#[pallet::storage_prefix = "CustomName"]` /// * `#[pallet::unbounded]` /// * `#[pallet::whitelist_storage] +/// * `#[pallet::disable_try_decode_storage]` pub enum PalletStorageAttr { Getter(syn::Ident, proc_macro2::Span), StorageName(syn::LitStr, proc_macro2::Span), Unbounded(proc_macro2::Span), WhitelistStorage(proc_macro2::Span), + DisableTryDecodeStorage(proc_macro2::Span), } impl PalletStorageAttr { @@ -53,6 +56,7 @@ impl PalletStorageAttr { Self::StorageName(_, span) | Self::Unbounded(span) | Self::WhitelistStorage(span) => *span, + Self::DisableTryDecodeStorage(span) => *span, } } } @@ -93,6 +97,9 @@ impl syn::parse::Parse for PalletStorageAttr { } else if lookahead.peek(keyword::whitelist_storage) { content.parse::()?; Ok(Self::WhitelistStorage(attr_span)) + } else if lookahead.peek(keyword::disable_try_decode_storage) { + content.parse::()?; + Ok(Self::DisableTryDecodeStorage(attr_span)) } else { Err(lookahead.error()) } @@ -104,6 +111,7 @@ struct PalletStorageAttrInfo { rename_as: Option, unbounded: bool, whitelisted: bool, + try_decode: bool, } impl PalletStorageAttrInfo { @@ -112,6 +120,7 @@ impl PalletStorageAttrInfo { let mut rename_as = None; let mut unbounded = false; let mut whitelisted = false; + let mut disable_try_decode_storage = false; for attr in attrs { match attr { PalletStorageAttr::Getter(ident, ..) if getter.is_none() => getter = Some(ident), @@ -119,6 +128,8 @@ impl PalletStorageAttrInfo { rename_as = Some(name), PalletStorageAttr::Unbounded(..) if !unbounded => unbounded = true, PalletStorageAttr::WhitelistStorage(..) if !whitelisted => whitelisted = true, + PalletStorageAttr::DisableTryDecodeStorage(..) if !disable_try_decode_storage => + disable_try_decode_storage = true, attr => return Err(syn::Error::new( attr.attr_span(), @@ -127,7 +138,13 @@ impl PalletStorageAttrInfo { } } - Ok(PalletStorageAttrInfo { getter, rename_as, unbounded, whitelisted }) + Ok(PalletStorageAttrInfo { + getter, + rename_as, + unbounded, + whitelisted, + try_decode: !disable_try_decode_storage, + }) } } @@ -186,6 +203,8 @@ pub struct StorageDef { pub unbounded: bool, /// Whether or not reads to this storage key will be ignored by benchmarking pub whitelisted: bool, + /// Whether or not to try to decode the storage key when running try-runtime checks. + pub try_decode: bool, /// Whether or not a default hasher is allowed to replace `_` pub use_default_hasher: bool, } @@ -775,7 +794,7 @@ impl StorageDef { }; let attrs: Vec = helper::take_item_pallet_attrs(&mut item.attrs)?; - let PalletStorageAttrInfo { getter, rename_as, mut unbounded, whitelisted } = + let PalletStorageAttrInfo { getter, rename_as, mut unbounded, whitelisted, try_decode } = PalletStorageAttrInfo::from_attrs(attrs)?; // set all storages to be unbounded if dev_mode is enabled @@ -921,6 +940,7 @@ impl StorageDef { named_generics, unbounded, whitelisted, + try_decode, use_default_hasher, }) } diff --git a/substrate/frame/support/procedural/src/runtime/expand/mod.rs b/substrate/frame/support/procedural/src/runtime/expand/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..93c88fce94b73665efa7912b36056b661b12be1e --- /dev/null +++ b/substrate/frame/support/procedural/src/runtime/expand/mod.rs @@ -0,0 +1,320 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::parse::runtime_types::RuntimeType; +use crate::{ + construct_runtime::{ + check_pallet_number, decl_all_pallets, decl_integrity_test, decl_pallet_runtime_setup, + decl_static_assertions, expand, + }, + runtime::{ + parse::{ + AllPalletsDeclaration, ExplicitAllPalletsDeclaration, ImplicitAllPalletsDeclaration, + }, + Def, + }, +}; +use cfg_expr::Predicate; +use frame_support_procedural_tools::{ + generate_access_from_frame_or_crate, generate_crate_access, generate_hidden_includes, +}; +use proc_macro2::TokenStream as TokenStream2; +use quote::quote; +use std::collections::HashSet; +use syn::{Ident, Result}; + +/// The fixed name of the system pallet. +const SYSTEM_PALLET_NAME: &str = "System"; + +pub fn expand(def: Def, legacy_ordering: bool) -> TokenStream2 { + let input = def.input; + + let (check_pallet_number_res, res) = match def.pallets { + AllPalletsDeclaration::Implicit(ref decl) => ( + check_pallet_number(input.clone(), decl.pallet_count), + construct_runtime_implicit_to_explicit(input.into(), decl.clone(), legacy_ordering), + ), + AllPalletsDeclaration::Explicit(ref decl) => ( + check_pallet_number(input, decl.pallets.len()), + construct_runtime_final_expansion( + def.runtime_struct.ident.clone(), + decl.clone(), + def.runtime_types.clone(), + legacy_ordering, + ), + ), + }; + + let res = res.unwrap_or_else(|e| e.to_compile_error()); + + // We want to provide better error messages to the user and thus, handle the error here + // separately. If there is an error, we print the error and still generate all of the code to + // get in overall less errors for the user. + let res = if let Err(error) = check_pallet_number_res { + let error = error.to_compile_error(); + + quote! { + #error + + #res + } + } else { + res + }; + + let res = expander::Expander::new("construct_runtime") + .dry(std::env::var("FRAME_EXPAND").is_err()) + .verbose(true) + .write_to_out_dir(res) + .expect("Does not fail because of IO in OUT_DIR; qed"); + + res.into() +} + +fn construct_runtime_implicit_to_explicit( + input: TokenStream2, + definition: ImplicitAllPalletsDeclaration, + legacy_ordering: bool, +) -> Result { + let frame_support = generate_access_from_frame_or_crate("frame-support")?; + let attr = if legacy_ordering { quote!((legacy_ordering)) } else { quote!() }; + let mut expansion = quote::quote!( + #[frame_support::runtime #attr] + #input + ); + for pallet in definition.pallet_decls.iter() { + let pallet_path = &pallet.path; + let pallet_name = &pallet.name; + let pallet_instance = pallet.instance.as_ref().map(|instance| quote::quote!(<#instance>)); + expansion = quote::quote!( + #frame_support::__private::tt_call! { + macro = [{ #pallet_path::tt_default_parts_v2 }] + frame_support = [{ #frame_support }] + ~~> #frame_support::match_and_insert! { + target = [{ #expansion }] + pattern = [{ #pallet_name = #pallet_path #pallet_instance }] + } + } + ); + } + + Ok(expansion) +} + +fn construct_runtime_final_expansion( + name: Ident, + definition: ExplicitAllPalletsDeclaration, + runtime_types: Vec, + legacy_ordering: bool, +) -> Result { + let ExplicitAllPalletsDeclaration { mut pallets, name: pallets_name } = definition; + + if !legacy_ordering { + // Ensure that order of hooks is based on the pallet index + pallets.sort_by_key(|p| p.index); + } + + let system_pallet = + pallets.iter().find(|decl| decl.name == SYSTEM_PALLET_NAME).ok_or_else(|| { + syn::Error::new( + pallets_name.span(), + "`System` pallet declaration is missing. \ + Please add this line: `pub type System = frame_system;`", + ) + })?; + if !system_pallet.cfg_pattern.is_empty() { + return Err(syn::Error::new( + system_pallet.name.span(), + "`System` pallet declaration is feature gated, please remove any `#[cfg]` attributes", + )) + } + + let features = pallets + .iter() + .filter_map(|decl| { + (!decl.cfg_pattern.is_empty()).then(|| { + decl.cfg_pattern.iter().flat_map(|attr| { + attr.predicates().filter_map(|pred| match pred { + Predicate::Feature(feat) => Some(feat), + Predicate::Test => Some("test"), + _ => None, + }) + }) + }) + }) + .flatten() + .collect::>(); + + let hidden_crate_name = "construct_runtime"; + let scrate = generate_crate_access(hidden_crate_name, "frame-support"); + let scrate_decl = generate_hidden_includes(hidden_crate_name, "frame-support"); + + let frame_system = generate_access_from_frame_or_crate("frame-system")?; + let block = quote!(<#name as #frame_system::Config>::Block); + let unchecked_extrinsic = quote!(<#block as #scrate::sp_runtime::traits::Block>::Extrinsic); + + let mut dispatch = None; + let mut outer_event = None; + let mut outer_error = None; + let mut outer_origin = None; + let mut freeze_reason = None; + let mut hold_reason = None; + let mut slash_reason = None; + let mut lock_id = None; + let mut task = None; + + for runtime_type in runtime_types.iter() { + match runtime_type { + RuntimeType::RuntimeCall(_) => { + dispatch = + Some(expand::expand_outer_dispatch(&name, system_pallet, &pallets, &scrate)); + }, + RuntimeType::RuntimeEvent(_) => { + outer_event = Some(expand::expand_outer_enum( + &name, + &pallets, + &scrate, + expand::OuterEnumType::Event, + )?); + }, + RuntimeType::RuntimeError(_) => { + outer_error = Some(expand::expand_outer_enum( + &name, + &pallets, + &scrate, + expand::OuterEnumType::Error, + )?); + }, + RuntimeType::RuntimeOrigin(_) => { + outer_origin = + Some(expand::expand_outer_origin(&name, system_pallet, &pallets, &scrate)?); + }, + RuntimeType::RuntimeFreezeReason(_) => { + freeze_reason = Some(expand::expand_outer_freeze_reason(&pallets, &scrate)); + }, + RuntimeType::RuntimeHoldReason(_) => { + hold_reason = Some(expand::expand_outer_hold_reason(&pallets, &scrate)); + }, + RuntimeType::RuntimeSlashReason(_) => { + slash_reason = Some(expand::expand_outer_slash_reason(&pallets, &scrate)); + }, + RuntimeType::RuntimeLockId(_) => { + lock_id = Some(expand::expand_outer_lock_id(&pallets, &scrate)); + }, + RuntimeType::RuntimeTask(_) => { + task = Some(expand::expand_outer_task(&name, &pallets, &scrate)); + }, + } + } + + let all_pallets = decl_all_pallets(&name, pallets.iter(), &features); + let pallet_to_index = decl_pallet_runtime_setup(&name, &pallets, &scrate); + + let metadata = expand::expand_runtime_metadata( + &name, + &pallets, + &scrate, + &unchecked_extrinsic, + &system_pallet.path, + ); + let outer_config = expand::expand_outer_config(&name, &pallets, &scrate); + let inherent = + expand::expand_outer_inherent(&name, &block, &unchecked_extrinsic, &pallets, &scrate); + let validate_unsigned = expand::expand_outer_validate_unsigned(&name, &pallets, &scrate); + let integrity_test = decl_integrity_test(&scrate); + let static_assertions = decl_static_assertions(&name, &pallets, &scrate); + + let res = quote!( + #scrate_decl + + // Prevent UncheckedExtrinsic to print unused warning. + const _: () = { + #[allow(unused)] + type __hidden_use_of_unchecked_extrinsic = #unchecked_extrinsic; + }; + + #[derive( + Clone, Copy, PartialEq, Eq, #scrate::sp_runtime::RuntimeDebug, + #scrate::__private::scale_info::TypeInfo + )] + pub struct #name; + impl #scrate::sp_runtime::traits::GetRuntimeBlockType for #name { + type RuntimeBlock = #block; + } + + // Each runtime must expose the `runtime_metadata()` to fetch the runtime API metadata. + // The function is implemented by calling `impl_runtime_apis!`. + // + // However, the `runtime` may be used without calling `impl_runtime_apis!`. + // Rely on the `Deref` trait to differentiate between a runtime that implements + // APIs (by macro impl_runtime_apis!) and a runtime that is simply created (by macro runtime). + // + // Both `InternalConstructRuntime` and `InternalImplRuntimeApis` expose a `runtime_metadata()` function. + // `InternalConstructRuntime` is implemented by the `runtime` for Runtime references (`& Runtime`), + // while `InternalImplRuntimeApis` is implemented by the `impl_runtime_apis!` for Runtime (`Runtime`). + // + // Therefore, the `Deref` trait will resolve the `runtime_metadata` from `impl_runtime_apis!` + // when both macros are called; and will resolve an empty `runtime_metadata` when only the `runtime` + // is used. + + #[doc(hidden)] + trait InternalConstructRuntime { + #[inline(always)] + fn runtime_metadata(&self) -> #scrate::__private::sp_std::vec::Vec<#scrate::__private::metadata_ir::RuntimeApiMetadataIR> { + Default::default() + } + } + #[doc(hidden)] + impl InternalConstructRuntime for &#name {} + + #outer_event + + #outer_error + + #outer_origin + + #all_pallets + + #pallet_to_index + + #dispatch + + #task + + #metadata + + #outer_config + + #inherent + + #validate_unsigned + + #freeze_reason + + #hold_reason + + #lock_id + + #slash_reason + + #integrity_test + + #static_assertions + ); + + Ok(res) +} diff --git a/substrate/frame/support/procedural/src/runtime/mod.rs b/substrate/frame/support/procedural/src/runtime/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..aaae579eb086638f1bfacf02105a8fcd0e3d01b5 --- /dev/null +++ b/substrate/frame/support/procedural/src/runtime/mod.rs @@ -0,0 +1,236 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementation of `runtime`. +//! +//! `runtime` implementation is recursive and can generate code which will call itself +//! in order to get all the pallet parts for each pallet. +//! +//! Pallets can define their parts: +//! - Implicitly: `pub type System = frame_system;` +//! - Explicitly: `pub type System = frame_system + Pallet + Call;` +//! +//! The `runtime` transitions from the implicit definition to the explicit one. +//! From the explicit state, Substrate expands the pallets with additional information +//! that is to be included in the runtime metadata. +//! +//! Pallets must provide the `tt_default_parts_v2` macro for these transitions. +//! These are automatically implemented by the `#[pallet::pallet]` macro. +//! +//! This macro also generates the following enums for ease of decoding if the respective type +//! is defined inside `#[runtime::derive]`: +//! - `enum RuntimeCall`: This type contains the information needed to decode extrinsics. +//! - `enum RuntimeEvent`: This type contains the information needed to decode events. +//! - `enum RuntimeError`: While this cannot be used directly to decode `sp_runtime::DispatchError` +//! from the chain, it contains the information needed to decode the +//! `sp_runtime::DispatchError::Module`. +//! +//! # State Transitions +//! +//! ```ignore +//! +----------+ +//! | Implicit | +//! +----------+ +//! | +//! v +//! +----------+ +//! | Explicit | +//! +----------+ +//! ``` +//! +//! The `runtime` macro transforms the implicit declaration of each pallet +//! `System: frame_system` to an explicit one `System: frame_system + Pallet + Call` using the +//! `tt_default_parts_v2` macro. +//! +//! The `tt_default_parts_v2` macro exposes a plus separated list of pallet parts. For example, the +//! `Event` part is exposed only if the pallet implements an event via `#[pallet::event]` macro. +//! The tokens generated by this macro are `+ Pallet + Call` for our example. +//! +//! The `match_and_insert` macro takes in 3 arguments: +//! - target: This is the `TokenStream` that contains the `runtime` macro. +//! - pattern: The pattern to match against in the target stream. +//! - tokens: The tokens to added after the pattern match. +//! +//! The `runtime` macro uses the `tt_call` to get the default pallet parts via +//! the `tt_default_parts_v2` macro defined by each pallet. The pallet parts are then returned as +//! input to the `match_and_replace` macro. +//! The `match_and_replace` then will modify the `runtime` to expand the implicit +//! definition to the explicit one. +//! +//! For example, +//! +//! ```ignore +//! #[frame_support::runtime] +//! mod runtime { +//! //... +//! +//! #[runtime::pallet_index(0)] +//! pub type System = frame_system; // Implicit definition of parts +//! +//! #[runtime::pallet_index(1)] +//! pub type Balances = pallet_balances; // Implicit definition of parts +//! } +//! ``` +//! This call has some implicit pallet parts, thus it will expand to: +//! ```ignore +//! frame_support::__private::tt_call! { +//! macro = [{ pallet_balances::tt_default_parts_v2 }] +//! ~~> frame_support::match_and_insert! { +//! target = [{ +//! frame_support::__private::tt_call! { +//! macro = [{ frame_system::tt_default_parts_v2 }] +//! ~~> frame_support::match_and_insert! { +//! target = [{ +//! #[frame_support::runtime] +//! mod runtime { +//! //... +//! +//! #[runtime::pallet_index(0)] +//! pub type System = frame_system; +//! +//! #[runtime::pallet_index(1)] +//! pub type Balances = pallet_balances; +//! } +//! }] +//! pattern = [{ System = frame_system }] +//! } +//! } +//! }] +//! pattern = [{ Balances = pallet_balances }] +//! } +//! } +//! ``` +//! `tt_default_parts_v2` must be defined. It returns the pallet parts inside some tokens, and +//! then `tt_call` will pipe the returned pallet parts into the input of `match_and_insert`. +//! Thus `match_and_insert` will initially receive the following inputs: +//! ```ignore +//! frame_support::match_and_insert! { +//! target = [{ +//! frame_support::match_and_insert! { +//! target = [{ +//! #[frame_support::runtime] +//! mod runtime { +//! //... +//! +//! #[runtime::pallet_index(0)] +//! pub type System = frame_system; +//! +//! #[runtime::pallet_index(1)] +//! pub type Balances = pallet_balances; +//! } +//! }] +//! pattern = [{ System = frame_system }] +//! tokens = [{ ::{+ Pallet + Call} }] +//! } +//! }] +//! pattern = [{ Balances = pallet_balances }] +//! tokens = [{ ::{+ Pallet + Call} }] +//! } +//! ``` +//! After dealing with `pallet_balances`, the inner `match_and_insert` will expand to: +//! ```ignore +//! frame_support::match_and_insert! { +//! target = [{ +//! #[frame_support::runtime] +//! mod runtime { +//! //... +//! +//! #[runtime::pallet_index(0)] +//! pub type System = frame_system; // Implicit definition of parts +//! +//! #[runtime::pallet_index(1)] +//! pub type Balances = pallet_balances + Pallet + Call; // Explicit definition of parts +//! } +//! }] +//! pattern = [{ System = frame_system }] +//! tokens = [{ ::{+ Pallet + Call} }] +//! } +//! ``` +//! +//! Which will then finally expand to the following: +//! ```ignore +//! #[frame_support::runtime] +//! mod runtime { +//! //... +//! +//! #[runtime::pallet_index(0)] +//! pub type System = frame_system + Pallet + Call; +//! +//! #[runtime::pallet_index(1)] +//! pub type Balances = pallet_balances + Pallet + Call; +//! } +//! ``` +//! +//! This call has no implicit pallet parts, thus it will expand to the runtime construction: +//! ```ignore +//! pub struct Runtime { ... } +//! pub struct Call { ... } +//! impl Call ... +//! pub enum Origin { ... } +//! ... +//! ``` +//! +//! Visualizing the entire flow of `#[frame_support::runtime]`, it would look like the following: +//! +//! ```ignore +//! +----------------------+ +------------------------+ +-------------------+ +//! | | | (defined in pallet) | | | +//! | runtime | --> | tt_default_parts_v2! | --> | match_and_insert! | +//! | w/ no pallet parts | | | | | +//! +----------------------+ +------------------------+ +-------------------+ +//! +//! +----------------------+ +//! | | +//! --> | runtime | +//! | w/ pallet parts | +//! +----------------------+ +//! ``` + +#![cfg(feature = "experimental")] + +pub use parse::Def; +use proc_macro::TokenStream; +use syn::spanned::Spanned; + +mod expand; +mod parse; + +mod keyword { + syn::custom_keyword!(legacy_ordering); +} + +pub fn runtime(attr: TokenStream, tokens: TokenStream) -> TokenStream { + let mut legacy_ordering = false; + if !attr.is_empty() { + if let Ok(_) = syn::parse::(attr.clone()) { + legacy_ordering = true; + } else { + let msg = "Invalid runtime macro call: unexpected attribute. Macro call must be \ + bare, such as `#[frame_support::runtime]` or `#[runtime]`, or must specify the \ + `legacy_ordering` attribute, such as `#[frame_support::runtime(legacy_ordering)]` or \ + #[runtime(legacy_ordering)]."; + let span = proc_macro2::TokenStream::from(attr).span(); + return syn::Error::new(span, msg).to_compile_error().into() + } + } + + let item = syn::parse_macro_input!(tokens as syn::ItemMod); + match parse::Def::try_from(item) { + Ok(def) => expand::expand(def, legacy_ordering).into(), + Err(e) => e.to_compile_error().into(), + } +} diff --git a/substrate/frame/support/procedural/src/runtime/parse/helper.rs b/substrate/frame/support/procedural/src/runtime/parse/helper.rs new file mode 100644 index 0000000000000000000000000000000000000000..f05395f9b7abd55201cdfd49f80debac32fde9f4 --- /dev/null +++ b/substrate/frame/support/procedural/src/runtime/parse/helper.rs @@ -0,0 +1,37 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::pallet::parse::helper::MutItemAttrs; +use quote::ToTokens; + +pub(crate) fn take_first_item_runtime_attr( + item: &mut impl MutItemAttrs, +) -> syn::Result> +where + Attr: syn::parse::Parse, +{ + let attrs = if let Some(attrs) = item.mut_item_attrs() { attrs } else { return Ok(None) }; + + if let Some(index) = attrs.iter().position(|attr| { + attr.path().segments.first().map_or(false, |segment| segment.ident == "runtime") + }) { + let runtime_attr = attrs.remove(index); + Ok(Some(syn::parse2(runtime_attr.into_token_stream())?)) + } else { + Ok(None) + } +} diff --git a/substrate/frame/support/procedural/src/runtime/parse/mod.rs b/substrate/frame/support/procedural/src/runtime/parse/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..893cb4726e2b6016fa2e728add86990e24087c42 --- /dev/null +++ b/substrate/frame/support/procedural/src/runtime/parse/mod.rs @@ -0,0 +1,266 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod helper; +pub mod pallet; +pub mod pallet_decl; +pub mod runtime_struct; +pub mod runtime_types; + +use crate::construct_runtime::parse::Pallet; +use pallet_decl::PalletDeclaration; +use proc_macro2::TokenStream as TokenStream2; +use quote::ToTokens; +use std::collections::HashMap; +use syn::{spanned::Spanned, Ident, Token}; + +use frame_support_procedural_tools::syn_ext as ext; +use runtime_types::RuntimeType; + +mod keyword { + use syn::custom_keyword; + + custom_keyword!(runtime); + custom_keyword!(derive); + custom_keyword!(pallet_index); + custom_keyword!(disable_call); + custom_keyword!(disable_unsigned); +} + +enum RuntimeAttr { + Runtime(proc_macro2::Span), + Derive(proc_macro2::Span, Vec), + PalletIndex(proc_macro2::Span, u8), + DisableCall(proc_macro2::Span), + DisableUnsigned(proc_macro2::Span), +} + +impl RuntimeAttr { + fn span(&self) -> proc_macro2::Span { + match self { + Self::Runtime(span) => *span, + Self::Derive(span, _) => *span, + Self::PalletIndex(span, _) => *span, + Self::DisableCall(span) => *span, + Self::DisableUnsigned(span) => *span, + } + } +} + +impl syn::parse::Parse for RuntimeAttr { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + input.parse::()?; + let content; + syn::bracketed!(content in input); + content.parse::()?; + content.parse::()?; + + let lookahead = content.lookahead1(); + if lookahead.peek(keyword::runtime) { + Ok(RuntimeAttr::Runtime(content.parse::()?.span())) + } else if lookahead.peek(keyword::derive) { + let _ = content.parse::(); + let derive_content; + syn::parenthesized!(derive_content in content); + let runtime_types = + derive_content.parse::>()?; + let runtime_types = runtime_types.inner.into_iter().collect(); + Ok(RuntimeAttr::Derive(derive_content.span(), runtime_types)) + } else if lookahead.peek(keyword::pallet_index) { + let _ = content.parse::(); + let pallet_index_content; + syn::parenthesized!(pallet_index_content in content); + let pallet_index = pallet_index_content.parse::()?; + if !pallet_index.suffix().is_empty() { + let msg = "Number literal must not have a suffix"; + return Err(syn::Error::new(pallet_index.span(), msg)) + } + Ok(RuntimeAttr::PalletIndex(pallet_index.span(), pallet_index.base10_parse()?)) + } else if lookahead.peek(keyword::disable_call) { + Ok(RuntimeAttr::DisableCall(content.parse::()?.span())) + } else if lookahead.peek(keyword::disable_unsigned) { + Ok(RuntimeAttr::DisableUnsigned(content.parse::()?.span())) + } else { + Err(lookahead.error()) + } + } +} + +#[derive(Debug, Clone)] +pub enum AllPalletsDeclaration { + Implicit(ImplicitAllPalletsDeclaration), + Explicit(ExplicitAllPalletsDeclaration), +} + +/// Declaration of a runtime with some pallet with implicit declaration of parts. +#[derive(Debug, Clone)] +pub struct ImplicitAllPalletsDeclaration { + pub name: Ident, + pub pallet_decls: Vec, + pub pallet_count: usize, +} + +/// Declaration of a runtime with all pallet having explicit declaration of parts. +#[derive(Debug, Clone)] +pub struct ExplicitAllPalletsDeclaration { + pub name: Ident, + pub pallets: Vec, +} + +pub struct Def { + pub input: TokenStream2, + pub item: syn::ItemMod, + pub runtime_struct: runtime_struct::RuntimeStructDef, + pub pallets: AllPalletsDeclaration, + pub runtime_types: Vec, +} + +impl Def { + pub fn try_from(mut item: syn::ItemMod) -> syn::Result { + let input: TokenStream2 = item.to_token_stream().into(); + let item_span = item.span(); + let items = &mut item + .content + .as_mut() + .ok_or_else(|| { + let msg = "Invalid runtime definition, expected mod to be inlined."; + syn::Error::new(item_span, msg) + })? + .1; + + let mut runtime_struct = None; + let mut runtime_types = None; + + let mut indices = HashMap::new(); + let mut names = HashMap::new(); + + let mut pallet_decls = vec![]; + let mut pallets = vec![]; + + for item in items.iter_mut() { + let mut pallet_item = None; + let mut pallet_index = 0; + + let mut disable_call = false; + let mut disable_unsigned = false; + + while let Some(runtime_attr) = + helper::take_first_item_runtime_attr::(item)? + { + match runtime_attr { + RuntimeAttr::Runtime(span) if runtime_struct.is_none() => { + let p = runtime_struct::RuntimeStructDef::try_from(span, item)?; + runtime_struct = Some(p); + }, + RuntimeAttr::Derive(_, types) if runtime_types.is_none() => { + runtime_types = Some(types); + }, + RuntimeAttr::PalletIndex(span, index) => { + pallet_index = index; + pallet_item = if let syn::Item::Type(item) = item { + Some(item.clone()) + } else { + let msg = "Invalid runtime::pallet_index, expected type definition"; + return Err(syn::Error::new(span, msg)) + }; + }, + RuntimeAttr::DisableCall(_) => disable_call = true, + RuntimeAttr::DisableUnsigned(_) => disable_unsigned = true, + attr => { + let msg = "Invalid duplicated attribute"; + return Err(syn::Error::new(attr.span(), msg)) + }, + } + } + + if let Some(pallet_item) = pallet_item { + match *pallet_item.ty.clone() { + syn::Type::Path(ref path) => { + let pallet_decl = + PalletDeclaration::try_from(item.span(), &pallet_item, path)?; + + if let Some(used_pallet) = + names.insert(pallet_decl.name.clone(), pallet_decl.name.span()) + { + let msg = "Two pallets with the same name!"; + + let mut err = syn::Error::new(used_pallet, &msg); + err.combine(syn::Error::new(pallet_decl.name.span(), &msg)); + return Err(err) + } + + pallet_decls.push(pallet_decl); + }, + syn::Type::TraitObject(syn::TypeTraitObject { bounds, .. }) => { + let pallet = Pallet::try_from( + item.span(), + &pallet_item, + pallet_index, + disable_call, + disable_unsigned, + &bounds, + )?; + + if let Some(used_pallet) = indices.insert(pallet.index, pallet.name.clone()) + { + let msg = format!( + "Pallet indices are conflicting: Both pallets {} and {} are at index {}", + used_pallet, pallet.name, pallet.index, + ); + let mut err = syn::Error::new(used_pallet.span(), &msg); + err.combine(syn::Error::new(pallet.name.span(), msg)); + return Err(err) + } + + pallets.push(pallet); + }, + _ => continue, + } + } + } + + let name = item.ident.clone(); + let decl_count = pallet_decls.len(); + let pallets = if decl_count > 0 { + AllPalletsDeclaration::Implicit(ImplicitAllPalletsDeclaration { + name, + pallet_decls, + pallet_count: decl_count.saturating_add(pallets.len()), + }) + } else { + AllPalletsDeclaration::Explicit(ExplicitAllPalletsDeclaration { name, pallets }) + }; + + let def = Def { + input, + item, + runtime_struct: runtime_struct.ok_or_else(|| { + syn::Error::new(item_span, + "Missing Runtime. Please add a struct inside the module and annotate it with `#[runtime::runtime]`" + ) + })?, + pallets, + runtime_types: runtime_types.ok_or_else(|| { + syn::Error::new(item_span, + "Missing Runtime Types. Please annotate the runtime struct with `#[runtime::derive]`" + ) + })?, + }; + + Ok(def) + } +} diff --git a/substrate/frame/support/procedural/src/runtime/parse/pallet.rs b/substrate/frame/support/procedural/src/runtime/parse/pallet.rs new file mode 100644 index 0000000000000000000000000000000000000000..d2f1857fb2b4feec841224f28d691076c8853427 --- /dev/null +++ b/substrate/frame/support/procedural/src/runtime/parse/pallet.rs @@ -0,0 +1,99 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::construct_runtime::parse::{Pallet, PalletPart, PalletPartKeyword, PalletPath}; +use quote::ToTokens; +use syn::{punctuated::Punctuated, spanned::Spanned, token, Error, Ident, PathArguments}; + +impl Pallet { + pub fn try_from( + attr_span: proc_macro2::Span, + item: &syn::ItemType, + pallet_index: u8, + disable_call: bool, + disable_unsigned: bool, + bounds: &Punctuated, + ) -> syn::Result { + let name = item.ident.clone(); + + let mut pallet_path = None; + let mut pallet_parts = vec![]; + + for (index, bound) in bounds.into_iter().enumerate() { + if let syn::TypeParamBound::Trait(syn::TraitBound { path, .. }) = bound { + if index == 0 { + pallet_path = Some(PalletPath { inner: path.clone() }); + } else { + let pallet_part = syn::parse2::(bound.into_token_stream())?; + pallet_parts.push(pallet_part); + } + } else { + return Err(Error::new( + attr_span, + "Invalid pallet declaration, expected a path or a trait object", + )) + }; + } + + let mut path = pallet_path.ok_or(Error::new( + attr_span, + "Invalid pallet declaration, expected a path or a trait object", + ))?; + + let mut instance = None; + if let Some(segment) = path.inner.segments.iter_mut().find(|seg| !seg.arguments.is_empty()) + { + if let PathArguments::AngleBracketed(syn::AngleBracketedGenericArguments { + args, .. + }) = segment.arguments.clone() + { + if let Some(syn::GenericArgument::Type(syn::Type::Path(arg_path))) = args.first() { + instance = + Some(Ident::new(&arg_path.to_token_stream().to_string(), arg_path.span())); + segment.arguments = PathArguments::None; + } + } + } + + pallet_parts = pallet_parts + .into_iter() + .filter(|part| { + if let (true, &PalletPartKeyword::Call(_)) = (disable_call, &part.keyword) { + false + } else if let (true, &PalletPartKeyword::ValidateUnsigned(_)) = + (disable_unsigned, &part.keyword) + { + false + } else { + true + } + }) + .collect(); + + let cfg_pattern = vec![]; + + Ok(Pallet { + is_expanded: true, + name, + index: pallet_index, + path, + instance, + cfg_pattern, + pallet_parts, + }) + } +} diff --git a/substrate/frame/support/procedural/src/runtime/parse/pallet_decl.rs b/substrate/frame/support/procedural/src/runtime/parse/pallet_decl.rs new file mode 100644 index 0000000000000000000000000000000000000000..437a163cfbc44973e753075d4a139fca5df81af8 --- /dev/null +++ b/substrate/frame/support/procedural/src/runtime/parse/pallet_decl.rs @@ -0,0 +1,60 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use quote::ToTokens; +use syn::{spanned::Spanned, Attribute, Ident, PathArguments}; + +/// The declaration of a pallet. +#[derive(Debug, Clone)] +pub struct PalletDeclaration { + /// The name of the pallet, e.g.`System` in `System: frame_system`. + pub name: Ident, + /// Optional attributes tagged right above a pallet declaration. + pub attrs: Vec, + /// The path of the pallet, e.g. `frame_system` in `System: frame_system`. + pub path: syn::Path, + /// The instance of the pallet, e.g. `Instance1` in `Council: pallet_collective::`. + pub instance: Option, +} + +impl PalletDeclaration { + pub fn try_from( + _attr_span: proc_macro2::Span, + item: &syn::ItemType, + path: &syn::TypePath, + ) -> syn::Result { + let name = item.ident.clone(); + + let mut path = path.path.clone(); + + let mut instance = None; + if let Some(segment) = path.segments.iter_mut().find(|seg| !seg.arguments.is_empty()) { + if let PathArguments::AngleBracketed(syn::AngleBracketedGenericArguments { + args, .. + }) = segment.arguments.clone() + { + if let Some(syn::GenericArgument::Type(syn::Type::Path(arg_path))) = args.first() { + instance = + Some(Ident::new(&arg_path.to_token_stream().to_string(), arg_path.span())); + segment.arguments = PathArguments::None; + } + } + } + + Ok(Self { name, path, instance, attrs: item.attrs.clone() }) + } +} diff --git a/substrate/frame/support/procedural/src/runtime/parse/runtime_struct.rs b/substrate/frame/support/procedural/src/runtime/parse/runtime_struct.rs new file mode 100644 index 0000000000000000000000000000000000000000..8fa746ee80727d7164d5dadb0f728fc7991362f7 --- /dev/null +++ b/substrate/frame/support/procedural/src/runtime/parse/runtime_struct.rs @@ -0,0 +1,35 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use syn::spanned::Spanned; +pub struct RuntimeStructDef { + pub ident: syn::Ident, + pub attr_span: proc_macro2::Span, +} + +impl RuntimeStructDef { + pub fn try_from(attr_span: proc_macro2::Span, item: &mut syn::Item) -> syn::Result { + let item = if let syn::Item::Struct(item) = item { + item + } else { + let msg = "Invalid runtime::runtime, expected struct definition"; + return Err(syn::Error::new(item.span(), msg)) + }; + + Ok(Self { ident: item.ident.clone(), attr_span }) + } +} diff --git a/substrate/frame/support/procedural/src/runtime/parse/runtime_types.rs b/substrate/frame/support/procedural/src/runtime/parse/runtime_types.rs new file mode 100644 index 0000000000000000000000000000000000000000..a4480e2a1fd32622bea3a7f20294b4c2bee88309 --- /dev/null +++ b/substrate/frame/support/procedural/src/runtime/parse/runtime_types.rs @@ -0,0 +1,76 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use syn::{ + parse::{Parse, ParseStream}, + Result, +}; + +mod keyword { + use syn::custom_keyword; + + custom_keyword!(RuntimeCall); + custom_keyword!(RuntimeEvent); + custom_keyword!(RuntimeError); + custom_keyword!(RuntimeOrigin); + custom_keyword!(RuntimeFreezeReason); + custom_keyword!(RuntimeHoldReason); + custom_keyword!(RuntimeSlashReason); + custom_keyword!(RuntimeLockId); + custom_keyword!(RuntimeTask); +} + +#[derive(Debug, Clone, PartialEq)] +pub enum RuntimeType { + RuntimeCall(keyword::RuntimeCall), + RuntimeEvent(keyword::RuntimeEvent), + RuntimeError(keyword::RuntimeError), + RuntimeOrigin(keyword::RuntimeOrigin), + RuntimeFreezeReason(keyword::RuntimeFreezeReason), + RuntimeHoldReason(keyword::RuntimeHoldReason), + RuntimeSlashReason(keyword::RuntimeSlashReason), + RuntimeLockId(keyword::RuntimeLockId), + RuntimeTask(keyword::RuntimeTask), +} + +impl Parse for RuntimeType { + fn parse(input: ParseStream) -> Result { + let lookahead = input.lookahead1(); + + if lookahead.peek(keyword::RuntimeCall) { + Ok(Self::RuntimeCall(input.parse()?)) + } else if lookahead.peek(keyword::RuntimeEvent) { + Ok(Self::RuntimeEvent(input.parse()?)) + } else if lookahead.peek(keyword::RuntimeError) { + Ok(Self::RuntimeError(input.parse()?)) + } else if lookahead.peek(keyword::RuntimeOrigin) { + Ok(Self::RuntimeOrigin(input.parse()?)) + } else if lookahead.peek(keyword::RuntimeFreezeReason) { + Ok(Self::RuntimeFreezeReason(input.parse()?)) + } else if lookahead.peek(keyword::RuntimeHoldReason) { + Ok(Self::RuntimeHoldReason(input.parse()?)) + } else if lookahead.peek(keyword::RuntimeSlashReason) { + Ok(Self::RuntimeSlashReason(input.parse()?)) + } else if lookahead.peek(keyword::RuntimeLockId) { + Ok(Self::RuntimeLockId(input.parse()?)) + } else if lookahead.peek(keyword::RuntimeTask) { + Ok(Self::RuntimeTask(input.parse()?)) + } else { + Err(lookahead.error()) + } + } +} diff --git a/substrate/frame/support/procedural/tools/Cargo.toml b/substrate/frame/support/procedural/tools/Cargo.toml index 9be5ca0bfd4c59f8dc9dd1a07bd81f0b393aefa4..a75307aca79b6ff241611b49b60c931ee1f83373 100644 --- a/substrate/frame/support/procedural/tools/Cargo.toml +++ b/substrate/frame/support/procedural/tools/Cargo.toml @@ -17,6 +17,6 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] proc-macro-crate = "3.0.0" proc-macro2 = "1.0.56" -quote = "1.0.28" -syn = { version = "2.0.49", features = ["extra-traits", "full", "visit"] } +quote = { workspace = true } +syn = { features = ["extra-traits", "full", "visit"], workspace = true } frame-support-procedural-tools-derive = { path = "derive" } diff --git a/substrate/frame/support/procedural/tools/derive/Cargo.toml b/substrate/frame/support/procedural/tools/derive/Cargo.toml index bb9935ba58ca8efd129f186b96a8d7b175e1dbee..b39d99a822fb7aed533bc7795daa53c903cc2952 100644 --- a/substrate/frame/support/procedural/tools/derive/Cargo.toml +++ b/substrate/frame/support/procedural/tools/derive/Cargo.toml @@ -19,5 +19,5 @@ proc-macro = true [dependencies] proc-macro2 = "1.0.56" -quote = { version = "1.0.28", features = ["proc-macro"] } -syn = { version = "2.0.49", features = ["extra-traits", "full", "parsing", "proc-macro"] } +quote = { features = ["proc-macro"], workspace = true } +syn = { features = ["extra-traits", "full", "parsing", "proc-macro"], workspace = true } diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index 8935acf4e2bb64dc0087c492e2174741cbb40d80..4eadd85a9e030dfbaabec76d4b84690b4f3d0287 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -508,6 +508,9 @@ pub use frame_support_procedural::{ construct_runtime, match_and_insert, transactional, PalletError, RuntimeDebugNoBound, }; +#[cfg(feature = "experimental")] +pub use frame_support_procedural::runtime; + #[doc(hidden)] pub use frame_support_procedural::{__create_tt_macro, __generate_dummy_part_checker}; @@ -700,7 +703,7 @@ pub use frame_support_procedural::crate_to_crate_version; #[macro_export] macro_rules! fail { ( $y:expr ) => {{ - return Err($y.into()) + return Err($y.into()); }}; } @@ -780,7 +783,7 @@ macro_rules! assert_err_with_weight { $crate::assert_err!($call.map(|_| ()).map_err(|e| e.error), $err); assert_eq!(dispatch_err_with_post.post_info.actual_weight, $weight); } else { - panic!("expected Err(_), got Ok(_).") + ::core::panic!("expected Err(_), got Ok(_).") } }; } @@ -900,18 +903,20 @@ pub mod pallet_prelude { }; pub use codec::{Decode, Encode, MaxEncodedLen}; pub use frame_support::pallet_macros::*; + /// The optional attribute `#[inject_runtime_type]` can be attached to `RuntimeCall`, /// `RuntimeEvent`, `RuntimeOrigin` or `PalletInfo` in an impl statement that has /// `#[register_default_impl]` attached to indicate that this item is generated by /// `construct_runtime`. /// /// Attaching this attribute to such an item ensures that the combined impl generated via - /// [`#[derive_impl(..)]`](`macro@super::derive_impl`) will use the correct type - /// auto-generated by `construct_runtime!`. + /// [`#[derive_impl(..)]`](`frame_support::derive_impl`) will use the correct + /// type auto-generated by + /// `construct_runtime!`. #[doc = docify::embed!("src/tests/inject_runtime_type.rs", derive_impl_works_with_runtime_type_injection)] /// /// However, if `no_aggregated_types` is specified while using - /// `[`#[derive_impl(..)]`](`macro@super::derive_impl`)`, then these items are attached + /// `[`#[derive_impl(..)]`](`frame_support::derive_impl`)`, then these items are attached /// verbatim to the combined impl. #[doc = docify::embed!("src/tests/inject_runtime_type.rs", derive_impl_works_with_no_aggregated_types)] pub use frame_support_procedural::inject_runtime_type; @@ -931,129 +936,26 @@ pub mod pallet_prelude { pub use sp_weights::Weight; } -/// The `pallet` attribute macro defines a pallet that can be used with -/// [`construct_runtime!`]. It must be attached to a module named `pallet` as follows: +/// The pallet struct placeholder `#[pallet::pallet]` is mandatory and allows you to +/// specify pallet information. /// -/// ```ignore -/// #[pallet] -/// pub mod pallet { -/// ... -/// } +/// The struct must be defined as follows: /// ``` +/// #[frame_support::pallet] +/// mod pallet { +/// #[pallet::pallet] // <- the macro +/// pub struct Pallet(_); // <- the struct definition /// -/// Note that various types can be automatically imported using -/// [`frame_support::pallet_prelude`] and `frame_system::pallet_prelude`: -/// -/// ```ignore -/// #[pallet] -/// pub mod pallet { -/// use frame_support::pallet_prelude::*; -/// use frame_system::pallet_prelude::*; -/// ... +/// #[pallet::config] +/// pub trait Config: frame_system::Config {} /// } /// ``` -/// -/// # pallet::* Attributes -/// -/// The `pallet` macro will parse any items within your `pallet` module that are annotated with -/// `#[pallet::*]` attributes. Some of these attributes are mandatory and some are optional, -/// and they can attach to different types of items within your pallet depending on the -/// attribute in question. The full list of `#[pallet::*]` attributes is shown below in the -/// order in which they are mentioned in this document: -/// -/// * [`pallet::pallet`](#pallet-struct-placeholder-palletpallet-mandatory) -/// * [`pallet::config`](#config-trait-palletconfig-mandatory) -/// * [`pallet::constant`](#palletconstant) -/// * [`pallet::disable_frame_system_supertrait_check`](#disable_supertrait_check) -/// * [`pallet::generate_store($vis trait Store)`](#palletgenerate_storevis-trait-store) -/// * [`pallet::storage_version`](#palletstorage_version) -/// * [`pallet::hooks`](#hooks-pallethooks-optional) -/// * [`pallet::call`](#call-palletcall-optional) -/// * [`pallet::weight($expr)`](#palletweightexpr) -/// * [`pallet::compact`](#palletcompact-some_arg-some_type) -/// * [`pallet::call_index($idx)`](#palletcall_indexidx) -/// * [`pallet::extra_constants`](#extra-constants-palletextra_constants-optional) -/// * [`pallet::error`](#error-palleterror-optional) -/// * [`pallet::event`](#event-palletevent-optional) -/// * [`pallet::generate_deposit($visibility fn -/// deposit_event)`](#palletgenerate_depositvisibility-fn-deposit_event) -/// * [`pallet::storage`](#storage-palletstorage-optional) -/// * [`pallet::getter(fn $my_getter_fn_name)`](#palletgetterfn-my_getter_fn_name-optional) -/// * [`pallet::storage_prefix = "SomeName"`](#palletstorage_prefix--somename-optional) -/// * [`pallet::unbounded`](#palletunbounded-optional) -/// * [`pallet::whitelist_storage`](#palletwhitelist_storage-optional) -/// * [`cfg(..)`](#cfg-for-storage) (on storage items) -/// * [`pallet::type_value`](#type-value-pallettype_value-optional) -/// * [`pallet::genesis_config`](#genesis-config-palletgenesis_config-optional) -/// * [`pallet::genesis_build`](#genesis-build-palletgenesis_build-optional) -/// * [`pallet::inherent`](#inherent-palletinherent-optional) -/// * [`pallet::validate_unsigned`](#validate-unsigned-palletvalidate_unsigned-optional) -/// * [`pallet::origin`](#origin-palletorigin-optional) -/// * [`pallet::composite_enum`](#composite-enum-palletcomposite_enum-optional) -/// -/// Note that at compile-time, the `#[pallet]` macro will analyze and expand all of these -/// attributes, ultimately removing their AST nodes before they can be parsed as real -/// attribute macro calls. This means that technically we do not need attribute macro -/// definitions for any of these attributes, however, for consistency and discoverability -/// reasons, we still maintain stub attribute macro definitions for all of these attributes in -/// the [`pallet_macros`] module which is automatically included in all pallets as part of the -/// pallet prelude. The actual "work" for all of these attribute macros can be found in the -/// macro expansion for `#[pallet]`. -/// -/// Also note that in this document, pallet attributes are explained using the syntax of -/// non-instantiable pallets. For an example of an instantiable pallet, see [this -/// example](#example-of-an-instantiable-pallet). -/// -/// # Dev Mode (`#[pallet(dev_mode)]`) -/// -/// Specifying the argument `dev_mode` on the `#[pallet]` or `#[frame_support::pallet]` -/// attribute attached to your pallet module will allow you to enable dev mode for a pallet. -/// The aim of dev mode is to loosen some of the restrictions and requirements placed on -/// production pallets for easy tinkering and development. Dev mode pallets should not be used -/// in production. Enabling dev mode has the following effects: -/// -/// * Weights no longer need to be specified on every `#[pallet::call]` declaration. By -/// default, dev mode pallets will assume a weight of zero (`0`) if a weight is not -/// specified. This is equivalent to specifying `#[weight(0)]` on all calls that do not -/// specify a weight. -/// * Call indices no longer need to be specified on every `#[pallet::call]` declaration. By -/// default, dev mode pallets will assume a call index based on the order of the call. -/// * All storages are marked as unbounded, meaning you do not need to implement -/// `MaxEncodedLen` on storage types. This is equivalent to specifying `#[pallet::unbounded]` -/// on all storage type definitions. -/// * Storage hashers no longer need to be specified and can be replaced by `_`. In dev mode, -/// these will be replaced by `Blake2_128Concat`. In case of explicit key-binding, `Hasher` -/// can simply be ignored when in `dev_mode`. -/// -/// Note that the `dev_mode` argument can only be supplied to the `#[pallet]` or -/// `#[frame_support::pallet]` attribute macro that encloses your pallet module. This argument -/// cannot be specified anywhere else, including but not limited to the `#[pallet::pallet]` -/// attribute macro. -/// -///
-/// WARNING:
-/// You should not deploy or use dev mode pallets in production. Doing so can break your chain
-/// and therefore should never be done. Once you are done tinkering, you should remove the
-/// 'dev_mode' argument from your #[pallet] declaration and fix any compile errors before
-/// attempting to use your pallet in a production scenario.
-/// 
-/// -/// # Pallet struct placeholder: `#[pallet::pallet]` (mandatory) -/// -/// The pallet struct placeholder `#[pallet::pallet]` is mandatory and allows you to specify -/// pallet information. -/// -/// The struct must be defined as follows: -/// ```ignore -/// #[pallet::pallet] -/// pub struct Pallet(_); -/// ``` +// /// I.e. a regular struct definition named `Pallet`, with generic T and no where clause. /// /// ## Macro expansion: /// -/// The macro adds this attribute to the struct definition: +/// The macro adds this attribute to the Pallet struct definition: /// ```ignore /// #[derive( /// frame_support::CloneNoBound, @@ -1062,1225 +964,926 @@ pub mod pallet_prelude { /// frame_support::RuntimeDebugNoBound, /// )] /// ``` -/// and replaces the type `_` with `PhantomData`. It also implements on the pallet: -/// * [`GetStorageVersion`](`traits::GetStorageVersion`) -/// * [`OnGenesis`](`traits::OnGenesis`): contains some logic to write the pallet version into -/// storage. -/// * `PalletErrorTypeInfo`: provides the type information for the pallet error, if defined. -/// -/// It declares `type Module` type alias for `Pallet`, used by `construct_runtime`. -/// -/// It implements [`PalletInfoAccess`](`traits::PalletInfoAccess') on `Pallet` to ease access -/// to pallet information given by [`frame_support::traits::PalletInfo`]. (The implementation -/// uses the associated type `frame_system::Config::PalletInfo`). +/// and replaces the type `_` with `PhantomData`. /// -/// It implements [`StorageInfoTrait`](`traits::StorageInfoTrait`) on `Pallet` which give -/// information about all storages. +/// It also implements on the pallet: /// -/// If the attribute `generate_store` is set then the macro creates the trait `Store` and -/// implements it on `Pallet`. +/// * [`GetStorageVersion`](frame_support::traits::GetStorageVersion) +/// * [`OnGenesis`](frame_support::traits::OnGenesis): contains some logic to write the pallet +/// version into storage. +/// * [`PalletInfoAccess`](frame_support::traits::PalletInfoAccess) to ease access to pallet +/// information given by [`frame_support::traits::PalletInfo`]. (The implementation uses the +/// associated type [`frame_support::traits::PalletInfo`]). +/// * [`StorageInfoTrait`](frame_support::traits::StorageInfoTrait) to give information about +/// storages. /// /// If the attribute `set_storage_max_encoded_len` is set then the macro calls -/// [`StorageInfoTrait`](`traits::StorageInfoTrait`) for each storage in the implementation of -/// [`StorageInfoTrait`](`traits::StorageInfoTrait`) for the pallet. Otherwise it implements -/// [`StorageInfoTrait`](`traits::StorageInfoTrait`) for the pallet using the -/// [`PartialStorageInfoTrait`](`traits::PartialStorageInfoTrait`) implementation of storages. -/// -/// # Config trait: `#[pallet::config]` (mandatory) -/// -/// The mandatory attribute `#[pallet::config]` defines the configurable options for the -/// pallet. -/// -/// Item must be defined as: -/// -/// ```ignore -/// #[pallet::config] -/// pub trait Config: frame_system::Config + $optionally_some_other_supertraits -/// $optional_where_clause -/// { -/// ... -/// } -/// ``` -/// -/// I.e. a regular trait definition named `Config`, with the supertrait -/// `frame_system::pallet::Config`, and optionally other supertraits and a where clause. -/// (Specifying other supertraits here is known as [tight -/// coupling](https://docs.substrate.io/reference/how-to-guides/pallet-design/use-tight-coupling/)) -/// -/// The associated type `RuntimeEvent` is reserved. If defined, it must have the bounds -/// `From` and `IsType<::RuntimeEvent>`. -/// -/// [`pallet::event`](`frame_support::pallet_macros::event`) must be present if `RuntimeEvent` -/// exists as a config item in your `#[pallet::config]`. -/// -/// Also see [`pallet::config`](`frame_support::pallet_macros::config`) -/// -/// ## `pallet::constant` -/// -/// The `#[pallet::constant]` attribute can be used to add an associated type trait bounded by -/// [`Get`](crate::traits::Get) from [`pallet::config`](#palletconfig) into metadata, e.g.: -/// -/// ```ignore -/// #[pallet::config] -/// pub trait Config: frame_system::Config { -/// #[pallet::constant] -/// type Foo: Get; -/// } -/// ``` -/// -/// Also see [`pallet::constant`](`frame_support::pallet_macros::constant`) -/// -/// ## `pallet::disable_frame_system_supertrait_check` -/// -/// -/// To bypass the `frame_system::Config` supertrait check, use the attribute -/// `pallet::disable_frame_system_supertrait_check`, e.g.: -/// -/// ```ignore -/// #[pallet::config] -/// #[pallet::disable_frame_system_supertrait_check] -/// pub trait Config: pallet_timestamp::Config {} -/// ``` -/// -/// NOTE: Bypassing the `frame_system::Config` supertrait check is typically desirable when you -/// want to write an alternative to the `frame_system` pallet. -/// -/// Also see -/// [`pallet::disable_frame_system_supertrait_check`](`frame_support::pallet_macros::disable_frame_system_supertrait_check`) -/// -/// ## Macro expansion: -/// -/// The macro expands pallet constant metadata with the information given by -/// `#[pallet::constant]`. -/// -/// # `pallet::generate_store($vis trait Store)` -/// -/// To generate a `Store` trait associating all storages, annotate your `Pallet` struct with -/// the attribute `#[pallet::generate_store($vis trait Store)]`, e.g.: -/// -/// ```ignore -/// #[pallet::pallet] -/// #[pallet::generate_store(pub(super) trait Store)] -/// pub struct Pallet(_); -/// ``` -/// More precisely, the `Store` trait contains an associated type for each storage. It is -/// implemented for `Pallet` allowing access to the storage from pallet struct. -/// -/// Thus when defining a storage named `Foo`, it can later be accessed from `Pallet` using -/// `::Foo`. -/// -/// NOTE: this attribute is only valid when applied _directly_ to your `Pallet` struct -/// definition. -/// -/// Also see [`pallet::generate_store`](`frame_support::pallet_macros::generate_store`). -/// -/// # `pallet::storage_version` -/// -/// Because the [`pallet::pallet`](#pallet-struct-placeholder-palletpallet-mandatory) macro -/// implements [`traits::GetStorageVersion`], the current storage version needs to be -/// communicated to the macro. This can be done by using the `pallet::storage_version` -/// attribute: -/// -/// ```ignore -/// const STORAGE_VERSION: StorageVersion = StorageVersion::new(5); -/// -/// #[pallet::pallet] -/// #[pallet::storage_version(STORAGE_VERSION)] -/// pub struct Pallet(_); -/// ``` -/// -/// If not present, the current storage version is set to the default value. -/// -/// Also see [`pallet::storage_version`](`frame_support::pallet_macros::storage_version`) -/// -/// # Hooks: `#[pallet::hooks]` (optional) -/// -/// The `pallet::hooks` attribute allows you to specify a `Hooks` implementation for `Pallet` -/// that specifies pallet-specific logic. -/// -/// The item the attribute attaches to must be defined as follows: -/// ```ignore -/// #[pallet::hooks] -/// impl Hooks> for Pallet $optional_where_clause { -/// ... -/// } -/// ``` -/// I.e. a regular trait implementation with generic bound: `T: Config`, for the trait -/// `Hooks>` (they are defined in preludes), for the type `Pallet` and -/// with an optional where clause. -/// -/// If no `#[pallet::hooks]` exists, then the following default implementation is -/// automatically generated: -/// ```ignore -/// #[pallet::hooks] -/// impl Hooks> for Pallet {} -/// ``` -/// -/// Also see [`pallet::hooks`](`frame_support::pallet_macros::hooks`) -/// -/// # Call: `#[pallet::call]` (optional) -/// -/// Implementation of pallet dispatchables. -/// -/// Item must be defined as: -/// ```ignore -/// #[pallet::call] -/// impl Pallet { -/// /// $some_doc -/// #[pallet::weight($ExpressionResultingInWeight)] -/// pub fn $fn_name( -/// origin: OriginFor, -/// $some_arg: $some_type, -/// // or with compact attribute: #[pallet::compact] $some_arg: $some_type, -/// ... -/// ) -> DispatchResultWithPostInfo { // or `-> DispatchResult` -/// ... -/// } -/// ... -/// } -/// ``` -/// I.e. a regular type implementation, with generic `T: Config`, on type `Pallet`, with -/// an optional where clause. -/// -/// ## `#[pallet::weight($expr)]` -/// -/// Each dispatchable needs to define a weight with `#[pallet::weight($expr)]` attribute, the -/// first argument must be `origin: OriginFor`. -/// -/// Also see [`pallet::weight`](`frame_support::pallet_macros::weight`) -/// -/// ### `#[pallet::compact] $some_arg: $some_type` -/// -/// Compact encoding for arguments can be achieved via `#[pallet::compact]`. The function must -/// return a `DispatchResultWithPostInfo` or `DispatchResult`. -/// -/// Also see [`pallet::compact`](`frame_support::pallet_macros::compact`) -/// -/// ## `#[pallet::call_index($idx)]` -/// -/// Each dispatchable may also be annotated with the `#[pallet::call_index($idx)]` attribute, -/// which explicitly defines the codec index for the dispatchable function in the `Call` enum. -/// -/// All call indexes start from 0, until it encounters a dispatchable function with a defined -/// call index. The dispatchable function that lexically follows the function with a defined -/// call index will have that call index, but incremented by 1, e.g. if there are 3 -/// dispatchable functions `fn foo`, `fn bar` and `fn qux` in that order, and only `fn bar` -/// has a call index of 10, then `fn qux` will have an index of 11, instead of 1. -/// -/// **WARNING**: modifying dispatchables, changing their order, removing some, etc., must be -/// done with care. Indeed this will change the outer runtime call type (which is an enum with -/// one variant per pallet), this outer runtime call can be stored on-chain (e.g. in -/// `pallet-scheduler`). Thus migration might be needed. To mitigate against some of this, the -/// `#[pallet::call_index($idx)]` attribute can be used to fix the order of the dispatchable so -/// that the `Call` enum encoding does not change after modification. As a general rule of -/// thumb, it is therefore adventageous to always add new calls to the end so you can maintain -/// the existing order of calls. -/// -/// Also see [`pallet::call_index`](`frame_support::pallet_macros::call_index`) -/// -/// # Extra constants: `#[pallet::extra_constants]` (optional) -/// -/// Allows you to define some extra constants to be added into constant metadata. -/// -/// Item must be defined as: -/// -/// ```ignore -/// #[pallet::extra_constants] -/// impl Pallet where $optional_where_clause { -/// /// $some_doc -/// $vis fn $fn_name() -> $some_return_type { -/// ... -/// } -/// ... -/// } -/// ``` -/// I.e. a regular rust `impl` block with some optional where clause and functions with 0 args, -/// 0 generics, and some return type. -/// -/// ## Macro expansion -/// -/// The macro add some extra constants to pallet constant metadata. -/// -/// Also see: [`pallet::extra_constants`](`frame_support::pallet_macros::extra_constants`) -/// -/// # Error: `#[pallet::error]` (optional) -/// -/// The `#[pallet::error]` attribute allows you to define an error enum that will be returned -/// from the dispatchable when an error occurs. The information for this error type is then -/// stored in metadata. -/// -/// Item must be defined as: -/// -/// ```ignore -/// #[pallet::error] -/// pub enum Error { -/// /// $some_optional_doc -/// $SomeFieldLessVariant, -/// /// $some_more_optional_doc -/// $SomeVariantWithOneField(FieldType), -/// ... -/// } -/// ``` -/// I.e. a regular enum named `Error`, with generic `T` and fieldless or multiple-field -/// variants. -/// -/// Any field type in the enum variants must implement [`scale_info::TypeInfo`] in order to be -/// properly used in the metadata, and its encoded size should be as small as possible, -/// preferably 1 byte in size in order to reduce storage size. The error enum itself has an -/// absolute maximum encoded size specified by [`MAX_MODULE_ERROR_ENCODED_SIZE`]. -/// -/// (1 byte can still be 256 different errors. The more specific the error, the easier it is to -/// diagnose problems and give a better experience to the user. Don't skimp on having lots of -/// individual error conditions.) -/// -/// Field types in enum variants must also implement [`PalletError`](traits::PalletError), -/// otherwise the pallet will fail to compile. Rust primitive types have already implemented -/// the [`PalletError`](traits::PalletError) trait along with some commonly used stdlib types -/// such as [`Option`] and -/// [`PhantomData`](`frame_support::__private::sp_std::marker::PhantomData`), and hence in most -/// use cases, a manual implementation is not necessary and is discouraged. -/// -/// The generic `T` must not bound anything and a `where` clause is not allowed. That said, -/// bounds and/or a where clause should not needed for any use-case. -/// -/// Also see: [`pallet::error`](`frame_support::pallet_macros::error`) -/// -/// # Event: `#[pallet::event]` (optional) -/// -/// Allows you to define pallet events. Pallet events are stored under the `system` / `events` -/// key when the block is applied (and then replaced when the next block writes it's events). -/// -/// The Event enum must be defined as follows: -/// -/// ```ignore -/// #[pallet::event] -/// #[pallet::generate_deposit($visibility fn deposit_event)] // Optional -/// pub enum Event<$some_generic> $optional_where_clause { -/// /// Some doc -/// $SomeName($SomeType, $YetanotherType, ...), -/// ... -/// } -/// ``` -/// -/// I.e. an enum (with named or unnamed fields variant), named `Event`, with generic: none or -/// `T` or `T: Config`, and optional w here clause. -/// -/// Each field must implement [`Clone`], [`Eq`], [`PartialEq`], [`Encode`], [`Decode`], and -/// [`Debug`] (on std only). For ease of use, bound by the trait -/// [`Member`](`frame_support::pallet_prelude::Member`), available in -/// frame_support::pallet_prelude. -/// -/// Also see [`pallet::event`](`frame_support::pallet_macros::event`) -/// -/// ## `#[pallet::generate_deposit($visibility fn deposit_event)]` -/// -/// The attribute `#[pallet::generate_deposit($visibility fn deposit_event)]` generates a -/// helper function on `Pallet` that handles deposit events. -/// -/// NOTE: For instantiable pallets, the event must be generic over `T` and `I`. -/// -/// Also see [`pallet::generate_deposit`](`frame_support::pallet_macros::generate_deposit`) -/// -/// # Storage: `#[pallet::storage]` (optional) -/// -/// The `#[pallet::storage]` attribute lets you define some abstract storage inside of runtime -/// storage and also set its metadata. This attribute can be used multiple times. -/// -/// Item should be defined as: -/// -/// ```ignore -/// #[pallet::storage] -/// #[pallet::getter(fn $getter_name)] // optional -/// $vis type $StorageName<$some_generic> $optional_where_clause -/// = $StorageType<$generic_name = $some_generics, $other_name = $some_other, ...>; -/// ``` -/// -/// or with unnamed generic: -/// -/// ```ignore -/// #[pallet::storage] -/// #[pallet::getter(fn $getter_name)] // optional -/// $vis type $StorageName<$some_generic> $optional_where_clause -/// = $StorageType<_, $some_generics, ...>; -/// ``` -/// -/// I.e. it must be a type alias, with generics: `T` or `T: Config`. The aliased type must be -/// one of [`StorageValue`](`pallet_prelude::StorageValue`), -/// [`StorageMap`](`pallet_prelude::StorageMap`) or -/// [`StorageDoubleMap`](`pallet_prelude::StorageDoubleMap`). The generic arguments of the -/// storage type can be given in two manners: named and unnamed. For named generic arguments, -/// the name for each argument should match the name defined for it on the storage struct: -/// * [`StorageValue`](`pallet_prelude::StorageValue`) expects `Value` and optionally -/// `QueryKind` and `OnEmpty`, -/// * [`StorageMap`](`pallet_prelude::StorageMap`) expects `Hasher`, `Key`, `Value` and -/// optionally `QueryKind` and `OnEmpty`, -/// * [`CountedStorageMap`](`pallet_prelude::CountedStorageMap`) expects `Hasher`, `Key`, -/// `Value` and optionally `QueryKind` and `OnEmpty`, -/// * [`StorageDoubleMap`](`pallet_prelude::StorageDoubleMap`) expects `Hasher1`, `Key1`, -/// `Hasher2`, `Key2`, `Value` and optionally `QueryKind` and `OnEmpty`. -/// -/// For unnamed generic arguments: Their first generic must be `_` as it is replaced by the -/// macro and other generic must declared as a normal generic type declaration. -/// -/// The `Prefix` generic written by the macro is generated using -/// `PalletInfo::name::>()` and the name of the storage type. E.g. if runtime names -/// the pallet "MyExample" then the storage `type Foo = ...` should use the prefix: -/// `Twox128(b"MyExample") ++ Twox128(b"Foo")`. -/// -/// For the [`CountedStorageMap`](`pallet_prelude::CountedStorageMap`) variant, the `Prefix` -/// also implements -/// [`CountedStorageMapInstance`](`frame_support::storage::types::CountedStorageMapInstance`). -/// It also associates a [`CounterPrefix`](`pallet_prelude::CounterPrefix'), which is -/// implemented the same as above, but the storage prefix is prepend with `"CounterFor"`. E.g. -/// if runtime names the pallet "MyExample" then the storage `type Foo = -/// CountedStorageaMap<...>` will store its counter at the prefix: `Twox128(b"MyExample") ++ -/// Twox128(b"CounterForFoo")`. -/// -/// E.g: -/// -/// ```ignore -/// #[pallet::storage] -/// pub(super) type MyStorage = StorageMap; -/// ``` -/// -/// In this case the final prefix used by the map is `Twox128(b"MyExample") ++ -/// Twox128(b"OtherName")`. -/// -/// Also see [`pallet::storage`](`frame_support::pallet_macros::storage`) -/// -/// ## `#[pallet::getter(fn $my_getter_fn_name)]` (optional) -/// -/// The optional attribute `#[pallet::getter(fn $my_getter_fn_name)]` allows you to define a -/// getter function on `Pallet`. -/// -/// Also see [`pallet::getter`](`frame_support::pallet_macros::getter`) -/// -/// ## `#[pallet::storage_prefix = "SomeName"]` (optional) -/// -/// The optional attribute `#[pallet::storage_prefix = "SomeName"]` allows you to define the -/// storage prefix to use, see how `Prefix` generic is implemented above. This is helpful if -/// you wish to rename the storage field but don't want to perform a migration. -/// -/// E.g: -/// -/// ```ignore -/// #[pallet::storage] -/// #[pallet::storage_prefix = "foo"] -/// #[pallet::getter(fn my_storage)] -/// pub(super) type MyStorage = StorageMap; -/// ``` -/// -/// or -/// -/// ```ignore -/// #[pallet::storage] -/// #[pallet::getter(fn my_storage)] -/// pub(super) type MyStorage = StorageMap<_, Blake2_128Concat, u32, u32>; -/// ``` -/// -/// Also see [`pallet::storage_prefix`](`frame_support::pallet_macros::storage_prefix`) -/// -/// ## `#[pallet::unbounded]` (optional) -/// -/// The optional attribute `#[pallet::unbounded]` declares the storage as unbounded. When -/// implementating the storage info (when `#[pallet::generate_storage_info]` is specified on -/// the pallet struct placeholder), the size of the storage will be declared as unbounded. This -/// can be useful for storage which can never go into PoV (Proof of Validity). -/// -/// Also see [`pallet::unbounded`](`frame_support::pallet_macros::unbounded`) -/// -/// ## `#[pallet::whitelist_storage]` (optional) -/// -/// The optional attribute `#[pallet::whitelist_storage]` will declare the storage as -/// whitelisted from benchmarking. -/// -/// See -/// [`pallet::whitelist_storage`](frame_support::pallet_macros::whitelist_storage) -/// for more info. -/// -/// ## `#[cfg(..)]` (for storage) -/// The optional attributes `#[cfg(..)]` allow conditional compilation for the storage. -/// -/// E.g: -/// -/// ```ignore -/// #[cfg(feature = "my-feature")] -/// #[pallet::storage] -/// pub(super) type MyStorage = StorageValue; -/// ``` -/// -/// All the `cfg` attributes are automatically copied to the items generated for the storage, -/// i.e. the getter, storage prefix, and the metadata element etc. -/// -/// Any type placed as the `QueryKind` parameter must implement -/// [`frame_support::storage::types::QueryKindTrait`]. There are 3 implementations of this -/// trait by default: -/// -/// 1. [`OptionQuery`](`frame_support::storage::types::OptionQuery`), the default `QueryKind` -/// used when this type parameter is omitted. Specifying this as the `QueryKind` would cause -/// storage map APIs that return a `QueryKind` to instead return an [`Option`], returning -/// `Some` when a value does exist under a specified storage key, and `None` otherwise. -/// 2. [`ValueQuery`](`frame_support::storage::types::ValueQuery`) causes storage map APIs that -/// return a `QueryKind` to instead return the value type. In cases where a value does not -/// exist under a specified storage key, the `OnEmpty` type parameter on `QueryKindTrait` is -/// used to return an appropriate value. -/// 3. [`ResultQuery`](`frame_support::storage::types::ResultQuery`) causes storage map APIs -/// that return a `QueryKind` to instead return a `Result`, with `T` being the value -/// type and `E` being the pallet error type specified by the `#[pallet::error]` attribute. -/// In cases where a value does not exist under a specified storage key, an `Err` with the -/// specified pallet error variant is returned. -/// -/// NOTE: If the `QueryKind` generic parameter is still generic at this stage or is using some -/// type alias then the generation of the getter might fail. In this case the getter can be -/// implemented manually. -/// -/// NOTE: The generic `Hasher` must implement the [`StorageHasher`] trait (or the type is not -/// usable at all). We use [`StorageHasher::METADATA`] for the metadata of the hasher of the -/// storage item. Thus generic hasher is supported. -/// -/// ## Macro expansion -/// -/// For each storage item the macro generates a struct named -/// `_GeneratedPrefixForStorage$NameOfStorage`, and implements -/// [`StorageInstance`](traits::StorageInstance) on it using the pallet and storage name. It -/// then uses it as the first generic of the aliased type. For -/// [`CountedStorageMap`](`pallet_prelude::CountedStorageMap`), -/// [`CountedStorageMapInstance`](`frame_support::storage::types::CountedStorageMapInstance`) -/// is implemented, and another similar struct is generated. -/// -/// For a named generic, the macro will reorder the generics, and remove the names. -/// -/// The macro implements the function `storage_metadata` on the `Pallet` implementing the -/// metadata for all storage items based on their kind: -/// * for a storage value, the type of the value is copied into the metadata -/// * for a storage map, the type of the values and the key's type is copied into the metadata -/// * for a storage double map, the type of the values, and the types of `key1` and `key2` are -/// copied into the metadata. -/// -/// # Type value: `#[pallet::type_value]` (optional) -/// -/// The `#[pallet::type_value]` attribute lets you define a struct implementing the -/// [`Get`](crate::traits::Get) trait to ease use of storage types. This attribute is meant to -/// be used alongside [`#[pallet::storage]`](#storage-palletstorage-optional) to define a -/// storage's default value. This attribute can be used multiple times. -/// -/// Item must be defined as: -/// -/// ```ignore -/// #[pallet::type_value] -/// fn $MyDefaultName<$some_generic>() -> $default_type $optional_where_clause { $expr } -/// ``` -/// -/// I.e.: a function definition with generics none or `T: Config` and a returned type. -/// -/// E.g.: -/// -/// ```ignore -/// #[pallet::type_value] -/// fn MyDefault() -> T::Balance { 3.into() } -/// ``` -/// -/// Also see [`pallet::type_value`](`frame_support::pallet_macros::type_value`) -/// -/// # Genesis config: `#[pallet::genesis_config]` (optional) +/// [`StorageInfoTrait`](frame_support::traits::StorageInfoTrait) for each storage in the +/// implementation of [`StorageInfoTrait`](frame_support::traits::StorageInfoTrait) for the +/// pallet. Otherwise it implements +/// [`StorageInfoTrait`](frame_support::traits::StorageInfoTrait) for the pallet using the +/// [`PartialStorageInfoTrait`](frame_support::traits::PartialStorageInfoTrait) +/// implementation of storages. /// -/// The `#[pallet::genesis_config]` attribute allows you to define the genesis configuration -/// for the pallet. +/// ## Dev Mode (`#[pallet(dev_mode)]`) /// -/// Item is defined as either an enum or a struct. It needs to be public and implement the -/// trait [`BuildGenesisConfig`](`traits::BuildGenesisConfig`) with -/// [`#[pallet::genesis_build]`](#genesis-build-palletgenesis_build-optional). The type -/// generics are constrained to be either none, or `T` or `T: Config`. +/// Specifying the argument `dev_mode` will allow you to enable dev mode for a pallet. The +/// aim of dev mode is to loosen some of the restrictions and requirements placed on +/// production pallets for easy tinkering and development. Dev mode pallets should not be +/// used in production. Enabling dev mode has the following effects: /// -/// E.g: -/// -/// ```ignore -/// #[pallet::genesis_config] -/// pub struct GenesisConfig { -/// _myfield: BalanceOf, -/// } -/// ``` -/// -/// Also see [`pallet::genesis_config`](`frame_support::pallet_macros::genesis_config`) -/// -/// # Genesis build: `#[pallet::genesis_build]` (optional) -/// -/// The `#[pallet::genesis_build]` attribute allows you to define how `genesis_configuration` -/// is built. This takes as input the `GenesisConfig` type (as `self`) and constructs the -/// pallet's initial state. -/// -/// The impl must be defined as: -/// -/// ```ignore -/// #[pallet::genesis_build] -/// impl GenesisBuild for GenesisConfig<$maybe_generics> { -/// fn build(&self) { $expr } -/// } -/// ``` -/// -/// I.e. a trait implementation with generic `T: Config`, of trait `GenesisBuild` on -/// type `GenesisConfig` with generics none or `T`. -/// -/// E.g.: -/// -/// ```ignore -/// #[pallet::genesis_build] -/// impl GenesisBuild for GenesisConfig { -/// fn build(&self) {} -/// } -/// ``` -/// -/// Also see [`pallet::genesis_build`](`frame_support::pallet_macros::genesis_build`) -/// -/// # Inherent: `#[pallet::inherent]` (optional) -/// -/// The `#[pallet::inherent]` attribute allows the pallet to provide some -/// [inherent](https://docs.substrate.io/fundamentals/transaction-types/#inherent-transactions). -/// An inherent is some piece of data that is inserted by a block authoring node at block -/// creation time and can either be accepted or rejected by validators based on whether the -/// data falls within an acceptable range. -/// -/// The most common inherent is the `timestamp` that is inserted into every block. Since there -/// is no way to validate timestamps, validators simply check that the timestamp reported by -/// the block authoring node falls within an acceptable range. -/// -/// Item must be defined as: -/// -/// ```ignore -/// #[pallet::inherent] -/// impl ProvideInherent for Pallet { -/// // ... regular trait implementation -/// } -/// ``` -/// -/// I.e. a trait implementation with bound `T: Config`, of trait -/// [`ProvideInherent`](`pallet_prelude::ProvideInherent`) for type `Pallet`, and some -/// optional where clause. -/// -/// Also see [`pallet::inherent`](`frame_support::pallet_macros::inherent`) -/// -/// # Validate unsigned: `#[pallet::validate_unsigned]` (optional) -/// -/// The `#[pallet::validate_unsigned]` attribute allows the pallet to validate some unsigned -/// transaction: -/// -/// Item must be defined as: -/// -/// ```ignore -/// #[pallet::validate_unsigned] -/// impl ValidateUnsigned for Pallet { -/// // ... regular trait implementation -/// } -/// ``` -/// -/// I.e. a trait implementation with bound `T: Config`, of trait -/// [`ValidateUnsigned`](`pallet_prelude::ValidateUnsigned`) for type `Pallet`, and some -/// optional where clause. -/// -/// NOTE: There is also the [`sp_runtime::traits::SignedExtension`] trait that can be used to -/// add some specific logic for transaction validation. -/// -/// Also see [`pallet::validate_unsigned`](`frame_support::pallet_macros::validate_unsigned`) -/// -/// # Origin: `#[pallet::origin]` (optional) -/// -/// The `#[pallet::origin]` attribute allows you to define some origin for the pallet. -/// -/// Item must be either a type alias, an enum, or a struct. It needs to be public. -/// -/// E.g.: -/// -/// ```ignore -/// #[pallet::origin] -/// pub struct Origin(PhantomData<(T)>); -/// ``` -/// -/// **WARNING**: modifying origin changes the outer runtime origin. This outer runtime origin -/// can be stored on-chain (e.g. in `pallet-scheduler`), thus any change must be done with care -/// as it might require some migration. -/// -/// NOTE: for instantiable pallets, the origin must be generic over `T` and `I`. -/// -/// Also see [`pallet::origin`](`frame_support::pallet_macros::origin`) -/// -/// # Composite enum `#[pallet::composite_enum]` (optional) -/// -/// The `#[pallet::composite_enum]` attribute allows you to define an enum on the pallet which -/// will then instruct `construct_runtime` to amalgamate all similarly-named enums from other -/// pallets into an aggregate enum. This is similar in principle with how the aggregate enum is -/// generated for `#[pallet::event]` or `#[pallet::error]`. -/// -/// The item tagged with `#[pallet::composite_enum]` MUST be an enum declaration, and can ONLY -/// be the following identifiers: `FreezeReason`, `HoldReason`, `LockId` or `SlashReason`. -/// Custom identifiers are not supported. -/// -/// NOTE: For ease of usage, when no `#[derive]` attributes are detected, the -/// `#[pallet::composite_enum]` attribute will automatically derive the following traits for -/// the enum: -/// -/// ```ignore -/// Copy, Clone, Eq, PartialEq, Encode, Decode, MaxEncodedLen, TypeInfo, RuntimeDebug -/// ``` -/// -/// The inverse is also true: if there are any #[derive] attributes present for the enum, then -/// the attribute will not automatically derive any of the traits described above. -/// -/// # General notes on instantiable pallets -/// -/// An instantiable pallet is one where Config is generic, i.e. `Config`. This allows -/// runtime to implement multiple instances of the pallet, by using different types for the -/// generic. This is the sole purpose of the generic `I`, but because -/// [`PalletInfo`](`traits::PalletInfo`) requires the `Pallet` placeholder to be static, it is -/// important to bound by `'static` whenever [`PalletInfo`](`traits::PalletInfo`) can be used. -/// Additionally, in order to make an instantiable pallet usable as a regular pallet without an -/// instance, it is important to bound by `= ()` on every type. -/// -/// Thus impl bound looks like `impl, I: 'static>`, and types look like -/// `SomeType` or `SomeType, I: 'static = ()>`. -/// -/// # Example of a non-instantiable pallet -/// -/// ``` -/// pub use pallet::*; // reexport in crate namespace for `construct_runtime!` -/// -/// #[frame_support::pallet] -/// // NOTE: The name of the pallet is provided by `construct_runtime` and is used as -/// // the unique identifier for the pallet's storage. It is not defined in the pallet itself. -/// pub mod pallet { -/// use frame_support::pallet_prelude::*; // Import various types used in the pallet definition -/// use frame_system::pallet_prelude::*; // Import some system helper types. -/// -/// type BalanceOf = ::Balance; -/// -/// // Define the generic parameter of the pallet -/// // The macro parses `#[pallet::constant]` attributes and uses them to generate metadata -/// // for the pallet's constants. -/// #[pallet::config] -/// pub trait Config: frame_system::Config { -/// #[pallet::constant] // put the constant in metadata -/// type MyGetParam: Get; -/// type Balance: Parameter + MaxEncodedLen + From; -/// type RuntimeEvent: From> + IsType<::RuntimeEvent>; -/// } -/// -/// // Define some additional constant to put into the constant metadata. -/// #[pallet::extra_constants] -/// impl Pallet { -/// /// Some description -/// fn exra_constant_name() -> u128 { 4u128 } -/// } -/// -/// // Define the pallet struct placeholder, various pallet function are implemented on it. -/// #[pallet::pallet] -/// #[pallet::generate_store(pub(super) trait Store)] -/// pub struct Pallet(_); -/// -/// // Implement the pallet hooks. -/// #[pallet::hooks] -/// impl Hooks> for Pallet { -/// fn on_initialize(_n: BlockNumberFor) -> Weight { -/// unimplemented!(); -/// } -/// -/// // can implement also: on_finalize, on_runtime_upgrade, offchain_worker, ... -/// // see `Hooks` trait -/// } -/// -/// // Declare Call struct and implement dispatchables. -/// // -/// // WARNING: Each parameter used in functions must implement: Clone, Debug, Eq, PartialEq, -/// // Codec. -/// // -/// // The macro parses `#[pallet::compact]` attributes on function arguments and implements -/// // the `Call` encoding/decoding accordingly. -/// #[pallet::call] -/// impl Pallet { -/// /// Doc comment put in metadata -/// #[pallet::weight(0)] // Defines weight for call (function parameters are in scope) -/// pub fn toto( -/// origin: OriginFor, -/// #[pallet::compact] _foo: u32, -/// ) -> DispatchResultWithPostInfo { -/// let _ = origin; -/// unimplemented!(); -/// } -/// } -/// -/// // Declare the pallet `Error` enum (this is optional). -/// // The macro generates error metadata using the doc comment on each variant. -/// #[pallet::error] -/// pub enum Error { -/// /// doc comment put into metadata -/// InsufficientProposersBalance, -/// } -/// -/// // Declare pallet Event enum (this is optional). -/// // -/// // WARNING: Each type used in variants must implement: Clone, Debug, Eq, PartialEq, Codec. -/// // -/// // The macro generates event metadata, and derive Clone, Debug, Eq, PartialEq and Codec -/// #[pallet::event] -/// // Generate a funciton on Pallet to deposit an event. -/// #[pallet::generate_deposit(pub(super) fn deposit_event)] -/// pub enum Event { -/// /// doc comment put in metadata -/// // `::AccountId` is not defined in metadata list, the last -/// // Thus the metadata is `::AccountId`. -/// Proposed(::AccountId), -/// /// doc -/// // here metadata will be `Balance` as define in metadata list -/// Spending(BalanceOf), -/// // here metadata will be `Other` as define in metadata list -/// Something(u32), -/// } -/// -/// // Define a struct which implements `frame_support::traits::Get` (optional). -/// #[pallet::type_value] -/// pub(super) fn MyDefault() -> T::Balance { 3.into() } -/// -/// // Declare a storage item. Any amount of storage items can be declared (optional). -/// // -/// // Is expected either `StorageValue`, `StorageMap` or `StorageDoubleMap`. -/// // The macro generates the prefix type and replaces the first generic `_`. -/// // -/// // The macro expands the metadata for the storage item with the type used: -/// // * for a storage value the type of the value is copied into the metadata -/// // * for a storage map the type of the values and the type of the key is copied into the metadata -/// // * for a storage double map the types of the values and keys are copied into the -/// // metadata. -/// // -/// // NOTE: The generic `Hasher` must implement the `StorageHasher` trait (or the type is not -/// // usable at all). We use [`StorageHasher::METADATA`] for the metadata of the hasher of the -/// // storage item. Thus generic hasher is supported. -/// #[pallet::storage] -/// pub(super) type MyStorageValue = -/// StorageValue>; -/// -/// // Another storage declaration -/// #[pallet::storage] -/// #[pallet::getter(fn my_storage)] -/// #[pallet::storage_prefix = "SomeOtherName"] -/// pub(super) type MyStorage = -/// StorageMap; -/// -/// // Declare the genesis config (optional). -/// // -/// // The macro accepts either a struct or an enum; it checks that generics are consistent. -/// // -/// // Type must implement the `Default` trait. -/// #[pallet::genesis_config] -/// #[derive(frame_support::DefaultNoBound)] -/// pub struct GenesisConfig { -/// _config: sp_std::marker::PhantomData, -/// _myfield: u32, -/// } -/// -/// // Declare genesis builder. (This is need only if GenesisConfig is declared) -/// #[pallet::genesis_build] -/// impl BuildGenesisConfig for GenesisConfig { -/// fn build(&self) {} -/// } -/// -/// // Declare a pallet origin (this is optional). -/// // -/// // The macro accept type alias or struct or enum, it checks generics are consistent. -/// #[pallet::origin] -/// pub struct Origin(PhantomData); -/// -/// // Declare a hold reason (this is optional). -/// // -/// // Creates a hold reason for this pallet that is aggregated by `construct_runtime`. -/// // A similar enum can be defined for `FreezeReason`, `LockId` or `SlashReason`. -/// #[pallet::composite_enum] -/// pub enum HoldReason { -/// SomeHoldReason -/// } -/// -/// // Declare validate_unsigned implementation (this is optional). -/// #[pallet::validate_unsigned] -/// impl ValidateUnsigned for Pallet { -/// type Call = Call; -/// fn validate_unsigned( -/// source: TransactionSource, -/// call: &Self::Call -/// ) -> TransactionValidity { -/// Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) -/// } -/// } -/// -/// // Declare inherent provider for pallet (this is optional). -/// #[pallet::inherent] -/// impl ProvideInherent for Pallet { -/// type Call = Call; -/// type Error = InherentError; -/// -/// const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; -/// -/// fn create_inherent(_data: &InherentData) -> Option { -/// unimplemented!(); -/// } -/// -/// fn is_inherent(_call: &Self::Call) -> bool { -/// unimplemented!(); -/// } -/// } -/// -/// // Regular rust code needed for implementing ProvideInherent trait -/// -/// #[derive(codec::Encode, sp_runtime::RuntimeDebug)] -/// #[cfg_attr(feature = "std", derive(codec::Decode))] -/// pub enum InherentError { -/// } -/// -/// impl sp_inherents::IsFatalError for InherentError { -/// fn is_fatal_error(&self) -> bool { -/// unimplemented!(); -/// } -/// } -/// -/// pub const INHERENT_IDENTIFIER: sp_inherents::InherentIdentifier = *b"testpall"; -/// } -/// ``` -/// -/// # Example of an instantiable pallet -/// -/// ``` -/// pub use pallet::*; -/// -/// #[frame_support::pallet] -/// pub mod pallet { -/// use frame_support::pallet_prelude::*; -/// use frame_system::pallet_prelude::*; -/// -/// type BalanceOf = >::Balance; +/// * Weights no longer need to be specified on every `#[pallet::call]` declaration. By +/// default, dev mode pallets will assume a weight of zero (`0`) if a weight is not +/// specified. This is equivalent to specifying `#[weight(0)]` on all calls that do not +/// specify a weight. +/// * Call indices no longer need to be specified on every `#[pallet::call]` declaration. By +/// default, dev mode pallets will assume a call index based on the order of the call. +/// * All storages are marked as unbounded, meaning you do not need to implement +/// [`MaxEncodedLen`](frame_support::pallet_prelude::MaxEncodedLen) on storage types. This is +/// equivalent to specifying `#[pallet::unbounded]` on all storage type definitions. +/// * Storage hashers no longer need to be specified and can be replaced by `_`. In dev mode, +/// these will be replaced by `Blake2_128Concat`. In case of explicit key-binding, `Hasher` +/// can simply be ignored when in `dev_mode`. /// -/// #[pallet::config] -/// pub trait Config: frame_system::Config { -/// #[pallet::constant] -/// type MyGetParam: Get; -/// type Balance: Parameter + MaxEncodedLen + From; -/// type RuntimeEvent: From> + IsType<::RuntimeEvent>; -/// } -/// -/// #[pallet::extra_constants] -/// impl, I: 'static> Pallet { -/// /// Some description -/// fn extra_constant_name() -> u128 { 4u128 } -/// } -/// -/// #[pallet::pallet] -/// #[pallet::generate_store(pub(super) trait Store)] -/// pub struct Pallet(PhantomData<(T, I)>); -/// -/// #[pallet::hooks] -/// impl, I: 'static> Hooks> for Pallet { -/// } -/// -/// #[pallet::call] -/// impl, I: 'static> Pallet { -/// /// Doc comment put in metadata -/// #[pallet::weight(0)] -/// pub fn toto(origin: OriginFor, #[pallet::compact] _foo: u32) -> DispatchResultWithPostInfo { -/// let _ = origin; -/// unimplemented!(); -/// } -/// } -/// -/// #[pallet::error] -/// pub enum Error { -/// /// doc comment put into metadata -/// InsufficientProposersBalance, -/// } -/// -/// #[pallet::event] -/// #[pallet::generate_deposit(pub(super) fn deposit_event)] -/// pub enum Event, I: 'static = ()> { -/// /// doc comment put in metadata -/// Proposed(::AccountId), -/// /// doc -/// Spending(BalanceOf), -/// Something(u32), -/// } -/// -/// #[pallet::type_value] -/// pub(super) fn MyDefault, I: 'static>() -> T::Balance { 3.into() } -/// -/// #[pallet::storage] -/// pub(super) type MyStorageValue, I: 'static = ()> = -/// StorageValue>; -/// -/// #[pallet::storage] -/// #[pallet::getter(fn my_storage)] -/// #[pallet::storage_prefix = "SomeOtherName"] -/// pub(super) type MyStorage = -/// StorageMap; -/// -/// #[pallet::genesis_config] -/// #[derive(frame_support::DefaultNoBound)] -/// pub struct GenesisConfig, I: 'static = ()> { -/// _config: sp_std::marker::PhantomData<(T,I)>, -/// _myfield: u32, -/// } -/// -/// #[pallet::genesis_build] -/// impl, I: 'static> BuildGenesisConfig for GenesisConfig { -/// fn build(&self) {} -/// } -/// -/// #[pallet::origin] -/// pub struct Origin(PhantomData<(T, I)>); -/// -/// #[pallet::composite_enum] -/// pub enum HoldReason { -/// SomeHoldReason -/// } -/// -/// #[pallet::validate_unsigned] -/// impl, I: 'static> ValidateUnsigned for Pallet { -/// type Call = Call; -/// fn validate_unsigned( -/// source: TransactionSource, -/// call: &Self::Call -/// ) -> TransactionValidity { -/// Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) -/// } -/// } -/// -/// #[pallet::inherent] -/// impl, I: 'static> ProvideInherent for Pallet { -/// type Call = Call; -/// type Error = InherentError; -/// -/// const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; -/// -/// fn create_inherent(_data: &InherentData) -> Option { -/// unimplemented!(); -/// } -/// -/// fn is_inherent(_call: &Self::Call) -> bool { -/// unimplemented!(); -/// } -/// } -/// -/// // Regular rust code needed for implementing ProvideInherent trait -/// -/// #[derive(codec::Encode, sp_runtime::RuntimeDebug)] -/// #[cfg_attr(feature = "std", derive(codec::Decode))] -/// pub enum InherentError { -/// } -/// -/// impl sp_inherents::IsFatalError for InherentError { -/// fn is_fatal_error(&self) -> bool { -/// unimplemented!(); -/// } -/// } -/// -/// pub const INHERENT_IDENTIFIER: sp_inherents::InherentIdentifier = *b"testpall"; -/// } -/// ``` +/// Note that the `dev_mode` argument can only be supplied to the `#[pallet]` or +/// `#[frame_support::pallet]` attribute macro that encloses your pallet module. This +/// argument cannot be specified anywhere else, including but not limited to the +/// `#[pallet::pallet]` attribute macro. /// -/// # Upgrade guidelines -/// -/// 1. Export the metadata of the pallet for later checks -/// - run your node with the pallet active -/// - query the metadata using the `state_getMetadata` RPC and curl, or use `subsee -p -/// > meta.json` -/// 2. Generate the template upgrade for the pallet provided by `decl_storage` with the -/// environment variable `PRINT_PALLET_UPGRADE`: `PRINT_PALLET_UPGRADE=1 cargo check -p -/// my_pallet`. This template can be used as it contains all information for storages, -/// genesis config and genesis build. -/// 3. Reorganize the pallet to have the trait `Config`, `decl_*` macros, -/// [`ValidateUnsigned`](`pallet_prelude::ValidateUnsigned`), -/// [`ProvideInherent`](`pallet_prelude::ProvideInherent`), and Origin` all together in one -/// file. Suggested order: -/// * `Config`, -/// * `decl_module`, -/// * `decl_event`, -/// * `decl_error`, -/// * `decl_storage`, -/// * `origin`, -/// * `validate_unsigned`, -/// * `provide_inherent`, so far it should compile and all be correct. -/// 4. start writing the new pallet module -/// ```ignore -/// pub use pallet::*; -/// -/// #[frame_support::pallet] -/// pub mod pallet { -/// use frame_support::pallet_prelude::*; -/// use frame_system::pallet_prelude::*; -/// use super::*; -/// -/// #[pallet::pallet] -/// #[pallet::generate_store($visibility_of_trait_store trait Store)] -/// // NOTE: if the visibility of trait store is private but you want to make it available -/// // in super, then use `pub(super)` or `pub(crate)` to make it available in crate. -/// pub struct Pallet(_); -/// // pub struct Pallet(PhantomData); // for instantiable pallet -/// } -/// ``` -/// 5. **migrate Config**: move trait into the module with -/// * all const in `decl_module` to [`#[pallet::constant]`](#palletconstant) -/// * add the bound `IsType<::RuntimeEvent>` to `type -/// RuntimeEvent` -/// 7. **migrate decl_module**: write: -/// ```ignore -/// #[pallet::hooks] -/// impl Hooks for Pallet { -/// } -/// ``` -/// and write inside `on_initialize`, `on_finalize`, `on_runtime_upgrade`, -/// `offchain_worker`, and `integrity_test`. -/// -/// then write: -/// ```ignore -/// #[pallet::call] -/// impl Pallet { -/// } -/// ``` -/// and write inside all the calls in `decl_module` with a few changes in the signature: -/// - origin must now be written completely, e.g. `origin: OriginFor` -/// - result type must be `DispatchResultWithPostInfo`, you need to write it and also you -/// might need to put `Ok(().into())` at the end or the function. -/// - `#[compact]` must now be written -/// [`#[pallet::compact]`](#palletcompact-some_arg-some_type) -/// - `#[weight = ..]` must now be written [`#[pallet::weight(..)]`](#palletweightexpr) -/// -/// 7. **migrate event**: rewrite as a simple enum with the attribute -/// [`#[pallet::event]`](#event-palletevent-optional), use [`#[pallet::generate_deposit($vis -/// fn deposit_event)]`](#event-palletevent-optional) to generate `deposit_event`, -/// 8. **migrate error**: rewrite it with attribute -/// [`#[pallet::error]`](#error-palleterror-optional). -/// 9. **migrate storage**: `decl_storage` provide an upgrade template (see 3.). All storages, -/// genesis config, genesis build and default implementation of genesis config can be taken -/// from it directly. -/// -/// Otherwise here is the manual process: -/// -/// first migrate the genesis logic. write: -/// ```ignore -/// #[pallet::genesis_config] -/// struct GenesisConfig { -/// // fields of add_extra_genesis -/// } -/// impl Default for GenesisConfig { -/// // type default or default provided for fields -/// } -/// #[pallet::genesis_build] -/// impl GenesisBuild for GenesisConfig { -/// // for instantiable pallet: -/// // `impl GenesisBuild for GenesisConfig { -/// fn build() { -/// // The add_extra_genesis build logic -/// } -/// } -/// ``` -/// for each storage, if it contains `config(..)` then add fields, and make it default to -/// the value in `= ..;` or the type default if none, if it contains no build then also add -/// the logic to build the value. for each storage if it contains `build(..)` then add the -/// logic to `genesis_build`. -/// -/// NOTE: within `decl_storage`: the individual config is executed first, followed by the -/// build and finally the `add_extra_genesis` build. -/// -/// Once this is done you can migrate storages individually, a few notes: -/// - for private storage use `pub(crate) type ` or `pub(super) type` or nothing, -/// - for storages with `get(fn ..)` use [`#[pallet::getter(fn -/// ...)]`](#palletgetterfn-my_getter_fn_name-optional) -/// - for storages with value being `Option<$something>` make generic `Value` being -/// `$something` and generic `QueryKind` being `OptionQuery` (note: this is default). -/// Otherwise make `Value` the complete value type and `QueryKind` being `ValueQuery`. -/// - for storages with default value: `= $expr;` provide some specific `OnEmpty` generic. -/// To do so use of `#[pallet::type_value]` to generate the wanted struct to put. -/// example: `MyStorage: u32 = 3u32` would be written: -/// -/// ```ignore -/// #[pallet::type_value] fn MyStorageOnEmpty() -> u32 { 3u32 } -/// #[pallet::storage] -/// pub(super) type MyStorage = StorageValue<_, u32, ValueQuery, MyStorageOnEmpty>; -/// ``` -/// -/// NOTE: `decl_storage` also generates the functions `assimilate_storage` and -/// `build_storage` directly on `GenesisConfig`, and these are sometimes used in tests. -/// In order not to break they can be implemented manually, one can implement those -/// functions by calling the `GenesisBuild` implementation. -/// 10. **migrate origin**: move the origin to the pallet module to be under a -/// [`#[pallet::origin]`](#origin-palletorigin-optional) attribute -/// 11. **migrate validate_unsigned**: move the -/// [`ValidateUnsigned`](`pallet_prelude::ValidateUnsigned`) implementation to the pallet -/// module under a -/// [`#[pallet::validate_unsigned]`](#validate-unsigned-palletvalidate_unsigned-optional) -/// attribute -/// 12. **migrate provide_inherent**: move the -/// [`ProvideInherent`](`pallet_prelude::ProvideInherent`) implementation to the pallet -/// module under a [`#[pallet::inherent]`](#inherent-palletinherent-optional) attribute -/// 13. rename the usage of `Module` to `Pallet` inside the crate. -/// 14. migration is done, now double check the migration with the checking migration -/// guidelines shown below. -/// -/// # Checking upgrade guidelines: -/// -/// * compare metadata. Use [subsee](https://github.com/ascjones/subsee) to fetch the metadata -/// and do a diff of the resulting json before and after migration. This checks for: -/// * call, names, signature, docs -/// * event names, docs -/// * error names, docs -/// * storage names, hasher, prefixes, default value -/// * error, error, constant -/// * manually check that: -/// * `Origin` was moved inside the macro under -/// [`#[pallet::origin]`](#origin-palletorigin-optional) if it exists -/// * [`ValidateUnsigned`](`pallet_prelude::ValidateUnsigned`) was moved inside the macro -/// under -/// [`#[pallet::validate_unsigned)]`](#validate-unsigned-palletvalidate_unsigned-optional) -/// if it exists -/// * [`ProvideInherent`](`pallet_prelude::ProvideInherent`) was moved inside the macro -/// under [`#[pallet::inherent)]`](#inherent-palletinherent-optional) if it exists -/// * `on_initialize` / `on_finalize` / `on_runtime_upgrade` / `offchain_worker` were moved -/// to the `Hooks` implementation -/// * storages with `config(..)` were converted to `GenesisConfig` field, and their default -/// is `= $expr;` if the storage has a default value -/// * storages with `build($expr)` or `config(..)` were built in `GenesisBuild::build` -/// * `add_extra_genesis` fields were converted to `GenesisConfig` field with their correct -/// default if specified -/// * `add_extra_genesis` build was written into `GenesisBuild::build` -/// * storage items defined with [`pallet`] use the name of the pallet provided by -/// [`traits::PalletInfo::name`] as `pallet_prefix` (in `decl_storage`, storage items used -/// the `pallet_prefix` given as input of `decl_storage` with the syntax `as Example`). Thus -/// a runtime using the pallet must be careful with this change. To handle this change: -/// * either ensure that the name of the pallet given to `construct_runtime!` is the same -/// as the name the pallet was giving to `decl_storage`, -/// * or do a storage migration from the old prefix used to the new prefix used. -/// -/// NOTE: The prefixes used by storage items are in metadata. Thus, ensuring the metadata -/// hasn't changed ensures that the `pallet_prefix`s used by the storage items haven't changed. -/// -/// # Notes when macro fails to show proper error message spans: -/// -/// Rustc loses span for some macro input. Some tips to fix it: -/// * do not use inner attribute: -/// ```ignore -/// #[pallet] -/// pub mod pallet { -/// //! This inner attribute will make span fail -/// .. -/// } -/// ``` -/// * use the newest nightly possible. +///
+/// WARNING:
+/// You should not deploy or use dev mode pallets in production. Doing so can break your
+/// chain and therefore should never be done. Once you are done tinkering, you should
+/// remove the 'dev_mode' argument from your #[pallet] declaration and fix any compile
+/// errors before attempting to use your pallet in a production scenario.
+/// 
pub use frame_support_procedural::pallet; -/// Contains macro stubs for all of the pallet:: macros -pub mod pallet_macros { - pub use frame_support_procedural::{ - composite_enum, config, disable_frame_system_supertrait_check, error, event, - extra_constants, feeless_if, generate_deposit, generate_store, getter, hooks, - import_section, inherent, no_default, no_default_bounds, origin, pallet_section, - storage_prefix, storage_version, type_value, unbounded, validate_unsigned, weight, - whitelist_storage, - }; +/// Contains macro stubs for all of the `pallet::` macros +pub mod pallet_macros { + /// Declare the storage as whitelisted from benchmarking. + /// + /// Doing so will exclude reads of that value's storage key from counting towards weight + /// calculations during benchmarking. + /// + /// This attribute should only be attached to storages that are known to be + /// read/used in every block. This will result in a more accurate benchmarking weight. + /// + /// ### Example + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::storage] + /// #[pallet::whitelist_storage] + /// pub type MyStorage = StorageValue<_, u32>; + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + /// ``` + pub use frame_support_procedural::whitelist_storage; + + /// Allows specifying the weight of a call. + /// + /// Each dispatchable needs to define a weight with the `#[pallet::weight($expr)]` + /// attribute. The first argument must be `origin: OriginFor`. + /// + /// ## Example + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # use frame_system::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::call] + /// impl Pallet { + /// #[pallet::weight({0})] // <- set actual weight here + /// #[pallet::call_index(0)] + /// pub fn something( + /// _: OriginFor, + /// foo: u32, + /// ) -> DispatchResult { + /// unimplemented!() + /// } + /// } + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + /// ``` + pub use frame_support_procedural::weight; + + /// Allows whitelisting a storage item from decoding during try-runtime checks. + /// + /// The optional attribute `#[pallet::disable_try_decode_storage]` will declare the + /// storage as whitelisted from decoding during try-runtime checks. This should only be + /// attached to transient storage which cannot be migrated during runtime upgrades. + /// + /// ### Example + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::storage] + /// #[pallet::disable_try_decode_storage] + /// pub type MyStorage = StorageValue<_, u32>; + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + /// ``` + pub use frame_support_procedural::disable_try_decode_storage; + + /// Declares a storage as unbounded in potential size. + /// + /// When implementating the storage info (when `#[pallet::generate_storage_info]` is + /// specified on the pallet struct placeholder), the size of the storage will be declared + /// as unbounded. This can be useful for storage which can never go into PoV (Proof of + /// Validity). + /// + /// ## Example + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::storage] + /// #[pallet::unbounded] + /// pub type MyStorage = StorageValue<_, u32>; + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + /// ``` + pub use frame_support_procedural::unbounded; + + /// Defines what storage prefix to use for a storage item when building the trie. + /// + /// This is helpful if you wish to rename the storage field but don't want to perform a + /// migration. + /// + /// ## Example + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::storage] + /// #[pallet::storage_prefix = "foo"] + /// pub type MyStorage = StorageValue<_, u32>; + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + /// ``` + pub use frame_support_procedural::storage_prefix; + + /// Ensures the generated `DefaultConfig` will not have any bounds for + /// that trait item. + /// + /// Attaching this attribute to a trait item ensures that the generated trait + /// `DefaultConfig` will not have any bounds for this trait item. + /// + /// As an example, if you have a trait item `type AccountId: SomeTrait;` in your `Config` + /// trait, the generated `DefaultConfig` will only have `type AccountId;` with no trait + /// bound. + pub use frame_support_procedural::no_default_bounds; + + /// Ensures the trait item will not be used as a default with the + /// `#[derive_impl(..)]` attribute macro. + /// + /// The optional attribute `#[pallet::no_default]` can be attached to trait items within a + /// `Config` trait impl that has [`#[pallet::config(with_default)]`](`config`) + /// attached. + pub use frame_support_procedural::no_default; + + /// Declares a module as importable into a pallet via + /// [`#[import_section]`](`import_section`). + /// + /// Note that sections are imported by their module name/ident, and should be referred to + /// by their _full path_ from the perspective of the target pallet. Do not attempt to make + /// use of `use` statements to bring pallet sections into scope, as this will not work + /// (unless you do so as part of a wildcard import, in which case it will work). + /// + /// ## Naming Logistics + /// + /// Also note that because of how `#[pallet_section]` works, pallet section names must be + /// globally unique _within the crate in which they are defined_. For more information on + /// why this must be the case, see macro_magic's + /// [`#[export_tokens]`](https://docs.rs/macro_magic/latest/macro_magic/attr.export_tokens.html) macro. + /// + /// Optionally, you may provide an argument to `#[pallet_section]` such as + /// `#[pallet_section(some_ident)]`, in the event that there is another pallet section in + /// same crate with the same ident/name. The ident you specify can then be used instead of + /// the module's ident name when you go to import it via + /// [`#[import_section]`](`import_section`). + pub use frame_support_procedural::pallet_section; + + /// The `#[pallet::inherent]` attribute allows the pallet to provide + /// [inherents](https://docs.substrate.io/fundamentals/transaction-types/#inherent-transactions). + /// + /// An inherent is some piece of data that is inserted by a block authoring node at block + /// creation time and can either be accepted or rejected by validators based on whether the + /// data falls within an acceptable range. + /// + /// The most common inherent is the `timestamp` that is inserted into every block. Since + /// there is no way to validate timestamps, validators simply check that the timestamp + /// reported by the block authoring node falls within an acceptable range. + /// + /// Example usage: + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # use frame_support::inherent::IsFatalError; + /// # use sp_timestamp::InherentError; + /// # use sp_std::result; + /// # + /// // Example inherent identifier + /// pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"timstap0"; + /// + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::inherent] + /// impl ProvideInherent for Pallet { + /// type Call = Call; + /// type Error = InherentError; + /// const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; + /// + /// fn create_inherent(data: &InherentData) -> Option { + /// unimplemented!() + /// } + /// + /// fn check_inherent( + /// call: &Self::Call, + /// data: &InherentData, + /// ) -> result::Result<(), Self::Error> { + /// unimplemented!() + /// } + /// + /// fn is_inherent(call: &Self::Call) -> bool { + /// unimplemented!() + /// } + /// } + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + /// ``` + /// + /// I.e. a trait implementation with bound `T: Config`, of trait `ProvideInherent` for type + /// `Pallet`, and some optional where clause. + /// + /// ## Macro expansion + /// + /// The macro currently makes no use of this information, but it might use this information + /// in the future to give information directly to `construct_runtime`. + pub use frame_support_procedural::inherent; + + /// Splits a pallet declaration into multiple parts. + /// + /// An attribute macro that can be attached to a module declaration. Doing so will + /// import the contents of the specified external pallet section that is defined + /// elsewhere using [`#[pallet_section]`](`pallet_section`). + /// + /// ## Example + /// ``` + /// # use frame_support::pallet_macros::pallet_section; + /// # use frame_support::pallet_macros::import_section; + /// # + /// /// A [`pallet_section`] that defines the events for a pallet. + /// /// This can later be imported into the pallet using [`import_section`]. + /// #[pallet_section] + /// mod events { + /// #[pallet::event] + /// #[pallet::generate_deposit(pub(super) fn deposit_event)] + /// pub enum Event { + /// /// Event documentation should end with an array that provides descriptive names for event + /// /// parameters. [something, who] + /// SomethingStored { something: u32, who: T::AccountId }, + /// } + /// } + /// + /// #[import_section(events)] + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config { + /// # type RuntimeEvent: From> + IsType<::RuntimeEvent>; + /// # } + /// } + /// ``` + /// + /// This will result in the contents of `some_section` being _verbatim_ imported into + /// the pallet above. Note that since the tokens for `some_section` are essentially + /// copy-pasted into the target pallet, you cannot refer to imports that don't also + /// exist in the target pallet, but this is easily resolved by including all relevant + /// `use` statements within your pallet section, so they are imported as well, or by + /// otherwise ensuring that you have the same imports on the target pallet. + /// + /// It is perfectly permissible to import multiple pallet sections into the same pallet, + /// which can be done by having multiple `#[import_section(something)]` attributes + /// attached to the pallet. + /// + /// Note that sections are imported by their module name/ident, and should be referred to + /// by their _full path_ from the perspective of the target pallet. + pub use frame_support_procedural::import_section; + + /// Allows defining getter functions on `Pallet` storage. + /// + /// ## Example + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::storage] + /// #[pallet::getter(fn my_getter_fn_name)] + /// pub type MyStorage = StorageValue<_, u32>; + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + /// ``` + /// + /// See [`pallet::storage`](`frame_support::pallet_macros::storage`) for more info. + pub use frame_support_procedural::getter; + + /// Defines constants that are added to the constant field of + /// [`PalletMetadata`](frame_metadata::v15::PalletMetadata) struct for this pallet. + /// + /// Must be defined like: + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// # + /// #[pallet::extra_constants] + /// impl Pallet // $optional_where_clause + /// { + /// #[pallet::constant_name(SomeU32ConstantName)] + /// /// Some doc + /// fn some_u32_constant() -> u32 { + /// 100u32 + /// } + /// } + /// } + /// ``` + /// + /// I.e. a regular rust `impl` block with some optional where clause and functions with 0 + /// args, 0 generics, and some return type. + pub use frame_support_procedural::extra_constants; + + #[rustfmt::skip] + /// Allows bypassing the `frame_system::Config` supertrait check. + /// + /// To bypass the syntactic `frame_system::Config` supertrait check, use the attribute + /// `pallet::disable_frame_system_supertrait_check`. + /// + /// Note this bypass is purely syntactic, and does not actually remove the requirement that your + /// pallet implements `frame_system::Config`. When using this check, your config is still required to implement + /// `frame_system::Config` either via + /// - Implementing a trait that itself implements `frame_system::Config` + /// - Tightly coupling it with another pallet which itself implements `frame_system::Config` + /// + /// e.g. + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # use frame_system::pallet_prelude::*; + /// trait OtherTrait: frame_system::Config {} + /// + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::config] + /// #[pallet::disable_frame_system_supertrait_check] + /// pub trait Config: OtherTrait {} + /// } + /// ``` + /// + /// To learn more about supertraits, see the + /// [trait_based_programming](../../polkadot_sdk_docs/reference_docs/trait_based_programming/index.html) + /// reference doc. + pub use frame_support_procedural::disable_frame_system_supertrait_check; + + /// The mandatory attribute allowing definition of configurable types for the pallet. + /// + /// Item must be defined as: + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::config] + /// pub trait Config: frame_system::Config // + $optionally_some_other_supertraits + /// // $optional_where_clause + /// { + /// // config items here + /// } + /// } + /// ``` + /// + /// I.e. a regular trait definition named `Config`, with the supertrait + /// [`frame_system::pallet::Config`](../../frame_system/pallet/trait.Config.html), and + /// optionally other supertraits and a where clause. (Specifying other supertraits here is + /// known as [tight coupling](https://docs.substrate.io/reference/how-to-guides/pallet-design/use-tight-coupling/)) + /// + /// The associated type `RuntimeEvent` is reserved. If defined, it must have the bounds + /// `From` and `IsType<::RuntimeEvent>`. + /// + /// [`#[pallet::event]`](`event`) must be present if `RuntimeEvent` + /// exists as a config item in your `#[pallet::config]`. + /// + /// ## Optional: `with_default` + /// + /// An optional `with_default` argument may also be specified. Doing so will automatically + /// generate a `DefaultConfig` trait inside your pallet which is suitable for use with + /// [`#[derive_impl(..)`](`frame_support::derive_impl`) to derive a default testing + /// config: + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # use frame_system::pallet_prelude::*; + /// # use core::fmt::Debug; + /// # use frame_support::traits::Contains; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::config(with_default)] // <- with_default is optional + /// pub trait Config: frame_system::Config { + /// /// The overarching event type. + /// #[pallet::no_default_bounds] // Default is not supported for RuntimeEvent + /// type RuntimeEvent: From> + IsType<::RuntimeEvent>; + /// + /// // ...other config items get default + /// } + /// + /// #[pallet::event] + /// pub enum Event { + /// SomeEvent(u16, u32), + /// } + /// } + /// ``` + /// + /// As shown above, you may also attach the [`#[pallet::no_default]`](`no_default`) + /// attribute to specify that a particular trait item _cannot_ be used as a default when a + /// test `Config` is derived using the [`#[derive_impl(..)]`](`frame_support::derive_impl`) + /// attribute macro. This will cause that particular trait item to simply not appear in + /// default testing configs based on this config (the trait item will not be included in + /// `DefaultConfig`). + /// + /// ### `DefaultConfig` Caveats + /// + /// The auto-generated `DefaultConfig` trait: + /// - is always a _subset_ of your pallet's `Config` trait. + /// - can only contain items that don't rely on externalities, such as + /// `frame_system::Config`. + /// + /// Trait items that _do_ rely on externalities should be marked with + /// [`#[pallet::no_default]`](`no_default`) + /// + /// Consequently: + /// - Any items that rely on externalities _must_ be marked with + /// [`#[pallet::no_default]`](`no_default`) or your trait will fail to compile when used + /// with [`derive_impl`](`frame_support::derive_impl`). + /// - Items marked with [`#[pallet::no_default]`](`no_default`) are entirely excluded from + /// the `DefaultConfig` trait, and therefore any impl of `DefaultConfig` doesn't need to + /// implement such items. + /// + /// For more information, see [`frame_support::derive_impl`]. + pub use frame_support_procedural::config; + + /// Allows defining an enum that gets composed as an aggregate enum by `construct_runtime`. + /// + /// The `#[pallet::composite_enum]` attribute allows you to define an enum that gets + /// composed as an aggregate enum by `construct_runtime`. This is similar in principle with + /// [frame_support_procedural::event] and [frame_support_procedural::error]. + /// + /// The attribute currently only supports enum definitions, and identifiers that are named + /// `FreezeReason`, `HoldReason`, `LockId` or `SlashReason`. Arbitrary identifiers for the + /// enum are not supported. The aggregate enum generated by + /// [`frame_support::construct_runtime`] will have the name of `RuntimeFreezeReason`, + /// `RuntimeHoldReason`, `RuntimeLockId` and `RuntimeSlashReason` respectively. + /// + /// NOTE: The aggregate enum generated by `construct_runtime` generates a conversion + /// function from the pallet enum to the aggregate enum, and automatically derives the + /// following traits: + /// + /// ```ignore + /// Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, MaxEncodedLen, TypeInfo, + /// RuntimeDebug + /// ``` + /// + /// For ease of usage, when no `#[derive]` attributes are found for the enum under + /// [`#[pallet::composite_enum]`](composite_enum), the aforementioned traits are + /// automatically derived for it. The inverse is also true: if there are any `#[derive]` + /// attributes found for the enum, then no traits will automatically be derived for it. + /// + /// e.g, defining `HoldReason` in a pallet + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::composite_enum] + /// pub enum HoldReason { + /// /// The NIS Pallet has reserved it for a non-fungible receipt. + /// #[codec(index = 0)] + /// SomeHoldReason, + /// #[codec(index = 1)] + /// SomeOtherHoldReason, + /// } + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + pub use frame_support_procedural::composite_enum; + + /// Allows the pallet to validate unsigned transactions. + /// + /// Item must be defined as: + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::validate_unsigned] + /// impl sp_runtime::traits::ValidateUnsigned for Pallet { + /// type Call = Call; + /// + /// fn validate_unsigned(_source: TransactionSource, _call: &Self::Call) -> TransactionValidity { + /// // Your implementation details here + /// unimplemented!() + /// } + /// } + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + /// ``` + /// + /// I.e. a trait implementation with bound `T: Config`, of trait + /// [`ValidateUnsigned`](frame_support::pallet_prelude::ValidateUnsigned) for + /// type `Pallet`, and some optional where clause. + /// + /// NOTE: There is also the [`sp_runtime::traits::SignedExtension`] trait that can be used + /// to add some specific logic for transaction validation. + /// + /// ## Macro expansion + /// + /// The macro currently makes no use of this information, but it might use this information + /// in the future to give information directly to [`frame_support::construct_runtime`]. + pub use frame_support_procedural::validate_unsigned; + + /// Allows defining a struct implementing the [`Get`](frame_support::traits::Get) trait to + /// ease the use of storage types. + /// + /// This attribute is meant to be used alongside [`#[pallet::storage]`](`storage`) to + /// define a storage's default value. This attribute can be used multiple times. + /// + /// Item must be defined as: + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use sp_runtime::FixedU128; + /// # use frame_support::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::storage] + /// pub(super) type SomeStorage = + /// StorageValue<_, FixedU128, ValueQuery, DefaultForSomeValue>; + /// + /// // Define default for ParachainId + /// #[pallet::type_value] + /// pub fn DefaultForSomeValue() -> FixedU128 { + /// FixedU128::from_u32(1) + /// } + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + /// ``` + /// + /// ## Macro expansion + /// + /// The macro renames the function to some internal name, generates a struct with the + /// original name of the function and its generic, and implements `Get<$ReturnType>` by + /// calling the user defined function. + pub use frame_support_procedural::type_value; + + /// Allows defining a storage version for the pallet. + /// + /// Because the `pallet::pallet` macro implements + /// [`GetStorageVersion`](frame_support::traits::GetStorageVersion), the current storage + /// version needs to be communicated to the macro. This can be done by using the + /// `pallet::storage_version` attribute: + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::StorageVersion; + /// # use frame_support::traits::GetStorageVersion; + /// # + /// const STORAGE_VERSION: StorageVersion = StorageVersion::new(5); + /// + /// #[pallet::pallet] + /// #[pallet::storage_version(STORAGE_VERSION)] + /// pub struct Pallet(_); + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + /// ``` + /// + /// If not present, the current storage version is set to the default value. + pub use frame_support_procedural::storage_version; + + /// The `#[pallet::hooks]` attribute allows you to specify a + /// [`frame_support::traits::Hooks`] implementation for `Pallet` that specifies + /// pallet-specific logic. + /// + /// The item the attribute attaches to must be defined as follows: + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # use frame_system::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::hooks] + /// impl Hooks> for Pallet { + /// // Implement hooks here + /// } + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + /// ``` + /// I.e. a regular trait implementation with generic bound: `T: Config`, for the trait + /// `Hooks>` (they are defined in preludes), for the type `Pallet`. + /// + /// Optionally, you could add a where clause. + /// + /// ## Macro expansion + /// + /// The macro implements the traits + /// [`OnInitialize`](frame_support::traits::OnInitialize), + /// [`OnIdle`](frame_support::traits::OnIdle), + /// [`OnFinalize`](frame_support::traits::OnFinalize), + /// [`OnRuntimeUpgrade`](frame_support::traits::OnRuntimeUpgrade), + /// [`OffchainWorker`](frame_support::traits::OffchainWorker), and + /// [`IntegrityTest`](frame_support::traits::IntegrityTest) using + /// the provided [`Hooks`](frame_support::traits::Hooks) implementation. + /// + /// NOTE: `OnRuntimeUpgrade` is implemented with `Hooks::on_runtime_upgrade` and some + /// additional logic. E.g. logic to write the pallet version into storage. + /// + /// NOTE: The macro also adds some tracing logic when implementing the above traits. The + /// following hooks emit traces: `on_initialize`, `on_finalize` and `on_runtime_upgrade`. + pub use frame_support_procedural::hooks; + + /// Generates a helper function on `Pallet` that handles deposit events. + /// + /// NOTE: For instantiable pallets, the event must be generic over `T` and `I`. + /// + /// ## Macro expansion + /// + /// The macro will add on enum `Event` the attributes: + /// * `#[derive(`[`frame_support::CloneNoBound`]`)]` + /// * `#[derive(`[`frame_support::EqNoBound`]`)]` + /// * `#[derive(`[`frame_support::PartialEqNoBound`]`)]` + /// * `#[derive(`[`frame_support::RuntimeDebugNoBound`]`)]` + /// * `#[derive(`[`codec::Encode`]`)]` + /// * `#[derive(`[`codec::Decode`]`)]` + /// + /// The macro implements `From>` for (). + /// + /// The macro implements a metadata function on `Event` returning the `EventMetadata`. + /// + /// If `#[pallet::generate_deposit]` is present then the macro implements `fn + /// deposit_event` on `Pallet`. + pub use frame_support_procedural::generate_deposit; + + /// Allows defining logic to make an extrinsic call feeless. + /// + /// Each dispatchable may be annotated with the `#[pallet::feeless_if($closure)]` + /// attribute, which explicitly defines the condition for the dispatchable to be feeless. + /// + /// The arguments for the closure must be the referenced arguments of the dispatchable + /// function. + /// + /// The closure must return `bool`. + /// + /// ### Example + /// + /// ``` + /// #[frame_support::pallet(dev_mode)] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # use frame_system::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::call] + /// impl Pallet { + /// #[pallet::call_index(0)] + /// /// Marks this call as feeless if `foo` is zero. + /// #[pallet::feeless_if(|_origin: &OriginFor, foo: &u32| -> bool { + /// *foo == 0 + /// })] + /// pub fn something( + /// _: OriginFor, + /// foo: u32, + /// ) -> DispatchResult { + /// unimplemented!() + /// } + /// } + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + /// ``` + /// + /// Please note that this only works for signed dispatchables and requires a signed + /// extension such as [`pallet_skip_feeless_payment::SkipCheckIfFeeless`] to wrap the + /// existing payment extension. Else, this is completely ignored and the dispatchable is + /// still charged. + /// + /// ### Macro expansion + /// + /// The macro implements the [`pallet_skip_feeless_payment::CheckIfFeeless`] trait on the + /// dispatchable and calls the corresponding closure in the implementation. + /// + /// [`pallet_skip_feeless_payment::SkipCheckIfFeeless`]: ../../pallet_skip_feeless_payment/struct.SkipCheckIfFeeless.html + /// [`pallet_skip_feeless_payment::CheckIfFeeless`]: ../../pallet_skip_feeless_payment/struct.SkipCheckIfFeeless.html + pub use frame_support_procedural::feeless_if; + + /// Allows defining an error enum that will be returned from the dispatchable when an error + /// occurs. + /// + /// The information for this error type is then stored in runtime metadata. + /// + /// Item must be defined as so: + /// + /// ``` + /// #[frame_support::pallet(dev_mode)] + /// mod pallet { + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::error] + /// pub enum Error { + /// /// SomeFieldLessVariant doc + /// SomeFieldLessVariant, + /// /// SomeVariantWithOneField doc + /// SomeVariantWithOneField(u32), + /// } + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + /// ``` + /// I.e. a regular enum named `Error`, with generic `T` and fieldless or multiple-field + /// variants. + /// + /// Any field type in the enum variants must implement [`scale_info::TypeInfo`] in order to + /// be properly used in the metadata, and its encoded size should be as small as possible, + /// preferably 1 byte in size in order to reduce storage size. The error enum itself has an + /// absolute maximum encoded size specified by + /// [`frame_support::MAX_MODULE_ERROR_ENCODED_SIZE`]. + /// + /// (1 byte can still be 256 different errors. The more specific the error, the easier it + /// is to diagnose problems and give a better experience to the user. Don't skimp on having + /// lots of individual error conditions.) + /// + /// Field types in enum variants must also implement [`frame_support::PalletError`], + /// otherwise the pallet will fail to compile. Rust primitive types have already + /// implemented the [`frame_support::PalletError`] trait along with some commonly used + /// stdlib types such as [`Option`] and [`sp_std::marker::PhantomData`], and hence + /// in most use cases, a manual implementation is not necessary and is discouraged. + /// + /// The generic `T` must not bound anything and a `where` clause is not allowed. That said, + /// bounds and/or a where clause should not needed for any use-case. + /// + /// ## Macro expansion + /// + /// The macro implements the [`Debug`] trait and functions `as_u8` using variant position, + /// and `as_str` using variant doc. + /// + /// The macro also implements `From>` for `&'static str` and `From>` for + /// `DispatchError`. + pub use frame_support_procedural::error; + + /// Allows defining pallet events. + /// + /// Pallet events are stored under the `system` / `events` key when the block is applied + /// (and then replaced when the next block writes it's events). + /// + /// The Event enum can be defined as follows: + /// + /// ``` + /// #[frame_support::pallet(dev_mode)] + /// mod pallet { + /// # use frame_support::pallet_prelude::IsType; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::event] + /// #[pallet::generate_deposit(fn deposit_event)] // Optional + /// pub enum Event { + /// /// SomeEvent doc + /// SomeEvent(u16, u32), // SomeEvent with two fields + /// } + /// + /// #[pallet::config] + /// pub trait Config: frame_system::Config { + /// /// The overarching runtime event type. + /// type RuntimeEvent: From> + /// + IsType<::RuntimeEvent>; + /// } + /// } + /// ``` + /// + /// I.e. an enum (with named or unnamed fields variant), named `Event`, with generic: none + /// or `T` or `T: Config`, and optional w here clause. + /// + /// `RuntimeEvent` must be defined in the `Config`, as shown in the example. + /// + /// Each field must implement [`Clone`], [`Eq`], [`PartialEq`], [`codec::Encode`], + /// [`codec::Decode`], and [`Debug`] (on std only). For ease of use, bound by the trait + /// `Member`, available in [`frame_support::pallet_prelude`]. + pub use frame_support_procedural::event; - /// Allows a pallet to declare a set of functions as a *dispatchable extrinsic*. In - /// slightly simplified terms, this macro declares the set of "transactions" of a pallet. + /// Allows a pallet to declare a set of functions as a *dispatchable extrinsic*. + /// + /// In slightly simplified terms, this macro declares the set of "transactions" of a + /// pallet. /// /// > The exact definition of **extrinsic** can be found in /// > [`sp_runtime::generic::UncheckedExtrinsic`]. @@ -2382,14 +1985,24 @@ pub mod pallet_macros { /// If no `#[pallet::call]` exists, then a default implementation corresponding to the /// following code is automatically generated: /// - /// ```ignore - /// #[pallet::call] - /// impl Pallet {} + /// ``` + /// #[frame_support::pallet(dev_mode)] + /// mod pallet { + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::call] // <- automatically generated + /// impl Pallet {} // <- automatically generated + /// + /// #[pallet::config] + /// pub trait Config: frame_system::Config {} + /// } /// ``` pub use frame_support_procedural::call; - /// Enforce the index of a variant in the generated `enum Call`. See [`call`] for more - /// information. + /// Enforce the index of a variant in the generated `enum Call`. + /// + /// See [`call`] for more information. /// /// All call indexes start from 0, until it encounters a dispatchable function with a /// defined call index. The dispatchable function that lexically follows the function with @@ -2399,7 +2012,9 @@ pub mod pallet_macros { pub use frame_support_procedural::call_index; /// Declares the arguments of a [`call`] function to be encoded using - /// [`codec::Compact`]. This will results in smaller extrinsic encoding. + /// [`codec::Compact`]. + /// + /// This will results in smaller extrinsic encoding. /// /// A common example of `compact` is for numeric values that are often times far far away /// from their theoretical maximum. For example, in the context of a crypto-currency, the @@ -2495,8 +2110,8 @@ pub mod pallet_macros { /// ``` pub use frame_support_procedural::genesis_build; - /// The `#[pallet::constant]` attribute can be used to add an associated type trait bounded - /// by [`Get`](frame_support::pallet_prelude::Get) from [`pallet::config`](`macro@config`) + /// Allows adding an associated type trait bounded by + /// [`Get`](frame_support::pallet_prelude::Get) from [`pallet::config`](`macro@config`) /// into metadata. /// /// ## Example @@ -2517,9 +2132,10 @@ pub mod pallet_macros { /// ``` pub use frame_support_procedural::constant; - /// Declares a type alias as a storage item. Storage items are pointers to data stored - /// on-chain (the *blockchain state*), under a specific key. The exact key is dependent on - /// the type of the storage. + /// Declares a type alias as a storage item. + /// + /// Storage items are pointers to data stored on-chain (the *blockchain state*), under a + /// specific key. The exact key is dependent on the type of the storage. /// /// > From the perspective of this pallet, the entire blockchain state is abstracted behind /// > a key-value api, namely [`sp_io::storage`]. @@ -2699,6 +2315,8 @@ pub mod pallet_macros { /// * [`macro@getter`]: Creates a custom getter function. /// * [`macro@storage_prefix`]: Overrides the default prefix of the storage item. /// * [`macro@unbounded`]: Declares the storage item as unbounded. + /// * [`macro@disable_try_decode_storage`]: Declares that try-runtime checks should not + /// attempt to decode the storage item. /// /// #### Example /// ``` @@ -2714,10 +2332,14 @@ pub mod pallet_macros { /// #[pallet::getter(fn foo)] /// #[pallet::storage_prefix = "OtherFoo"] /// #[pallet::unbounded] + /// #[pallet::disable_try_decode_storage] /// pub type Foo = StorageValue<_, u32, ValueQuery>; /// } /// ``` pub use frame_support_procedural::storage; + + /// Allows defining conditions for a task to run. + /// /// This attribute is attached to a function inside an `impl` block annoated with /// [`pallet::tasks_experimental`](`tasks_experimental`) to define the conditions for a /// given work item to be valid. @@ -2726,6 +2348,9 @@ pub mod pallet_macros { /// should have the same signature as the function it is attached to, except that it should /// return a `bool` instead. pub use frame_support_procedural::task_condition; + + /// Allows defining an index for a task. + /// /// This attribute is attached to a function inside an `impl` block annoated with /// [`pallet::tasks_experimental`](`tasks_experimental`) to define the index of a given /// work item. @@ -2733,22 +2358,29 @@ pub mod pallet_macros { /// It takes an integer literal as input, which is then used to define the index. This /// index should be unique for each function in the `impl` block. pub use frame_support_procedural::task_index; + + /// Allows defining an iterator over available work items for a task. + /// /// This attribute is attached to a function inside an `impl` block annoated with - /// [`pallet::tasks_experimental`](`tasks_experimental`) to define an iterator over the - /// available work items for a task. + /// [`pallet::tasks_experimental`](`tasks_experimental`). /// /// It takes an iterator as input that yields a tuple with same types as the function /// arguments. pub use frame_support_procedural::task_list; + + /// Allows defining the weight of a task. + /// /// This attribute is attached to a function inside an `impl` block annoated with /// [`pallet::tasks_experimental`](`tasks_experimental`) define the weight of a given work /// item. /// /// It takes a closure as input, which should return a `Weight` value. pub use frame_support_procedural::task_weight; + /// Allows you to define some service work that can be recognized by a script or an - /// off-chain worker. Such a script can then create and submit all such work items at any - /// given time. + /// off-chain worker. + /// + /// Such a script can then create and submit all such work items at any given time. /// /// These work items are defined as instances of the [`Task`](frame_support::traits::Task) /// trait. [`pallet:tasks_experimental`](`tasks_experimental`) when attached to an `impl` @@ -2773,6 +2405,64 @@ pub mod pallet_macros { /// Now, this can be executed as follows: #[doc = docify::embed!("src/tests/tasks.rs", tasks_work)] pub use frame_support_procedural::tasks_experimental; + + /// Allows a pallet to declare a type as an origin. + /// + /// If defined as such, this type will be amalgamated at the runtime level into + /// `RuntimeOrigin`, very similar to [`call`], [`error`] and [`event`]. See + /// [`composite_enum`] for similar cases. + /// + /// Origin is a complex FRAME topics and is further explained in `polkadot_sdk_docs`. + /// + /// ## Syntax Variants + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// # #[pallet::pallet] + /// # pub struct Pallet(_); + /// /// On the spot declaration. + /// #[pallet::origin] + /// #[derive(PartialEq, Eq, Clone, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen)] + /// pub enum Origin { + /// Foo, + /// Bar, + /// } + /// } + /// ``` + /// + /// Or, more commonly used: + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// # #[pallet::pallet] + /// # pub struct Pallet(_); + /// #[derive(PartialEq, Eq, Clone, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen)] + /// pub enum RawOrigin { + /// Foo, + /// Bar, + /// } + /// + /// #[pallet::origin] + /// pub type Origin = RawOrigin; + /// } + /// ``` + /// + /// ## Warning + /// + /// Modifying any pallet's origin type will cause the runtime level origin type to also + /// change in encoding. If stored anywhere on-chain, this will require a data migration. + /// + /// Read more about origins at the [Origin Reference + /// Docs](../../polkadot_sdk_docs/reference_docs/frame_origin/index.html). + pub use frame_support_procedural::origin; } #[deprecated(note = "Will be removed after July 2023; Use `sp_runtime::traits` directly instead.")] diff --git a/substrate/frame/support/src/migrations.rs b/substrate/frame/support/src/migrations.rs index d059a992a8664cd8c3583f7f6fd1bff6af102770..2ceab44cb16bd4bcf356caa07a3506aa76ba79db 100644 --- a/substrate/frame/support/src/migrations.rs +++ b/substrate/frame/support/src/migrations.rs @@ -16,13 +16,21 @@ // limitations under the License. use crate::{ - traits::{GetStorageVersion, NoStorageVersionSet, PalletInfoAccess, StorageVersion}, - weights::{RuntimeDbWeight, Weight}, + defensive, + storage::transactional::with_transaction_opaque_err, + traits::{ + Defensive, GetStorageVersion, NoStorageVersionSet, PalletInfoAccess, SafeMode, + StorageVersion, + }, + weights::{RuntimeDbWeight, Weight, WeightMeter}, }; +use codec::{Decode, Encode, MaxEncodedLen}; use impl_trait_for_tuples::impl_for_tuples; +use sp_arithmetic::traits::Bounded; use sp_core::Get; use sp_io::{hashing::twox_128, storage::clear_prefix, KillStorageResult}; -use sp_std::marker::PhantomData; +use sp_runtime::traits::Zero; +use sp_std::{marker::PhantomData, vec::Vec}; /// Handles storage migration pallet versioning. /// @@ -43,12 +51,30 @@ use sp_std::marker::PhantomData; /// Otherwise, a warning is logged notifying the developer that the upgrade was a noop and should /// probably be removed. /// +/// It is STRONGLY RECOMMENDED to write the unversioned migration logic in a private module and +/// only export the versioned migration logic to prevent accidentally using the unversioned +/// migration in any runtimes. +/// /// ### Examples /// ```ignore /// // In file defining migrations -/// pub struct VersionUncheckedMigrateV5ToV6(sp_std::marker::PhantomData); -/// impl OnRuntimeUpgrade for VersionUncheckedMigrateV5ToV6 { -/// // OnRuntimeUpgrade implementation... +/// +/// /// Private module containing *version unchecked* migration logic. +/// /// +/// /// Should only be used by the [`VersionedMigration`] type in this module to create something to +/// /// export. +/// /// +/// /// We keep this private so the unversioned migration cannot accidentally be used in any runtimes. +/// /// +/// /// For more about this pattern of keeping items private, see +/// /// - https://github.com/rust-lang/rust/issues/30905 +/// /// - https://internals.rust-lang.org/t/lang-team-minutes-private-in-public-rules/4504/40 +/// mod version_unchecked { +/// use super::*; +/// pub struct MigrateV5ToV6(sp_std::marker::PhantomData); +/// impl OnRuntimeUpgrade for VersionUncheckedMigrateV5ToV6 { +/// // OnRuntimeUpgrade implementation... +/// } /// } /// /// pub type MigrateV5ToV6 = @@ -73,7 +99,7 @@ pub struct VersionedMigration), @@ -91,7 +117,7 @@ impl< const FROM: u16, const TO: u16, Inner: crate::traits::OnRuntimeUpgrade, - Pallet: GetStorageVersion + PalletInfoAccess, + Pallet: GetStorageVersion + PalletInfoAccess, DbWeight: Get, > crate::traits::OnRuntimeUpgrade for VersionedMigration { @@ -100,7 +126,6 @@ impl< /// migration ran or not. #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { - use codec::Encode; let on_chain_version = Pallet::on_chain_storage_version(); if on_chain_version == FROM { Ok(VersionedPostUpgradeData::MigrationExecuted(Inner::pre_upgrade()?).encode()) @@ -163,25 +188,25 @@ impl< } } -/// Can store the current pallet version in storage. -pub trait StoreCurrentStorageVersion { - /// Write the current storage version to the storage. - fn store_current_storage_version(); +/// Can store the in-code pallet version on-chain. +pub trait StoreInCodeStorageVersion { + /// Write the in-code storage version on-chain. + fn store_in_code_storage_version(); } -impl + PalletInfoAccess> - StoreCurrentStorageVersion for StorageVersion +impl + PalletInfoAccess> + StoreInCodeStorageVersion for StorageVersion { - fn store_current_storage_version() { - let version = ::current_storage_version(); + fn store_in_code_storage_version() { + let version = ::in_code_storage_version(); version.put::(); } } -impl + PalletInfoAccess> - StoreCurrentStorageVersion for NoStorageVersionSet +impl + PalletInfoAccess> + StoreInCodeStorageVersion for NoStorageVersionSet { - fn store_current_storage_version() { + fn store_in_code_storage_version() { StorageVersion::default().put::(); } } @@ -193,7 +218,7 @@ pub trait PalletVersionToStorageVersionHelper { impl PalletVersionToStorageVersionHelper for T where - T::CurrentStorageVersion: StoreCurrentStorageVersion, + T::InCodeStorageVersion: StoreInCodeStorageVersion, { fn migrate(db_weight: &RuntimeDbWeight) -> Weight { const PALLET_VERSION_STORAGE_KEY_POSTFIX: &[u8] = b":__PALLET_VERSION__:"; @@ -204,8 +229,7 @@ where sp_io::storage::clear(&pallet_version_key(::name())); - >::store_current_storage_version( - ); + >::store_in_code_storage_version(); db_weight.writes(2) } @@ -226,7 +250,7 @@ impl PalletVersionToStorageVersionHelper for T { /// Migrate from the `PalletVersion` struct to the new [`StorageVersion`] struct. /// -/// This will remove all `PalletVersion's` from the state and insert the current storage version. +/// This will remove all `PalletVersion's` from the state and insert the in-code storage version. pub fn migrate_from_pallet_version_to_storage_version< Pallets: PalletVersionToStorageVersionHelper, >( @@ -344,3 +368,622 @@ impl, DbWeight: Get> frame_support::traits Ok(()) } } + +/// A migration that can proceed in multiple steps. +pub trait SteppedMigration { + /// The cursor type that stores the progress (aka. state) of this migration. + type Cursor: codec::FullCodec + codec::MaxEncodedLen; + + /// The unique identifier type of this migration. + type Identifier: codec::FullCodec + codec::MaxEncodedLen; + + /// The unique identifier of this migration. + /// + /// If two migrations have the same identifier, then they are assumed to be identical. + fn id() -> Self::Identifier; + + /// The maximum number of steps that this migration can take. + /// + /// This can be used to enforce progress and prevent migrations becoming stuck forever. A + /// migration that exceeds its max steps is treated as failed. `None` means that there is no + /// limit. + fn max_steps() -> Option { + None + } + + /// Try to migrate as much as possible with the given weight. + /// + /// **ANY STORAGE CHANGES MUST BE ROLLED-BACK BY THE CALLER UPON ERROR.** This is necessary + /// since the caller cannot return a cursor in the error case. [`Self::transactional_step`] is + /// provided as convenience for a caller. A cursor of `None` implies that the migration is at + /// its end. A migration that once returned `Nonce` is guaranteed to never be called again. + fn step( + cursor: Option, + meter: &mut WeightMeter, + ) -> Result, SteppedMigrationError>; + + /// Same as [`Self::step`], but rolls back pending changes in the error case. + fn transactional_step( + mut cursor: Option, + meter: &mut WeightMeter, + ) -> Result, SteppedMigrationError> { + with_transaction_opaque_err(move || match Self::step(cursor, meter) { + Ok(new_cursor) => { + cursor = new_cursor; + sp_runtime::TransactionOutcome::Commit(Ok(cursor)) + }, + Err(err) => sp_runtime::TransactionOutcome::Rollback(Err(err)), + }) + .map_err(|()| SteppedMigrationError::Failed)? + } +} + +/// Error that can occur during a [`SteppedMigration`]. +#[derive(Debug, Encode, Decode, MaxEncodedLen, scale_info::TypeInfo)] +pub enum SteppedMigrationError { + // Transient errors: + /// The remaining weight is not enough to do anything. + /// + /// Can be resolved by calling with at least `required` weight. Note that calling it with + /// exactly `required` weight could cause it to not make any progress. + InsufficientWeight { + /// Amount of weight required to make progress. + required: Weight, + }, + // Permanent errors: + /// The migration cannot decode its cursor and therefore not proceed. + /// + /// This should not happen unless (1) the migration itself returned an invalid cursor in a + /// previous iteration, (2) the storage got corrupted or (3) there is a bug in the caller's + /// code. + InvalidCursor, + /// The migration encountered a permanent error and cannot continue. + Failed, +} + +/// Notification handler for status updates regarding Multi-Block-Migrations. +#[impl_trait_for_tuples::impl_for_tuples(8)] +pub trait MigrationStatusHandler { + /// Notifies of the start of a runtime migration. + fn started() {} + + /// Notifies of the completion of a runtime migration. + fn completed() {} +} + +/// Handles a failed runtime migration. +/// +/// This should never happen, but is here for completeness. +pub trait FailedMigrationHandler { + /// Infallibly handle a failed runtime migration. + /// + /// Gets passed in the optional index of the migration in the batch that caused the failure. + /// Returning `None` means that no automatic handling should take place and the callee decides + /// in the implementation what to do. + fn failed(migration: Option) -> FailedMigrationHandling; +} + +/// Do now allow any transactions to be processed after a runtime upgrade failed. +/// +/// This is **not a sane default**, since it prevents governance intervention. +pub struct FreezeChainOnFailedMigration; + +impl FailedMigrationHandler for FreezeChainOnFailedMigration { + fn failed(_migration: Option) -> FailedMigrationHandling { + FailedMigrationHandling::KeepStuck + } +} + +/// Enter safe mode on a failed runtime upgrade. +/// +/// This can be very useful to manually intervene and fix the chain state. `Else` is used in case +/// that the safe mode could not be entered. +pub struct EnterSafeModeOnFailedMigration( + PhantomData<(SM, Else)>, +); + +impl FailedMigrationHandler + for EnterSafeModeOnFailedMigration +where + ::BlockNumber: Bounded, +{ + fn failed(migration: Option) -> FailedMigrationHandling { + let entered = if SM::is_entered() { + SM::extend(Bounded::max_value()) + } else { + SM::enter(Bounded::max_value()) + }; + + // If we could not enter or extend safe mode (for whatever reason), then we try the next. + if entered.is_err() { + Else::failed(migration) + } else { + FailedMigrationHandling::KeepStuck + } + } +} + +/// How to proceed after a runtime upgrade failed. +/// +/// There is NO SANE DEFAULT HERE. All options are very dangerous and should be used with care. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum FailedMigrationHandling { + /// Resume extrinsic processing of the chain. This will not resume the upgrade. + /// + /// This should be supplemented with additional measures to ensure that the broken chain state + /// does not get further messed up by user extrinsics. + ForceUnstuck, + /// Set the cursor to `Stuck` and keep blocking extrinsics. + KeepStuck, + /// Don't do anything with the cursor and let the handler decide. + /// + /// This can be useful in cases where the other two options would overwrite any changes that + /// were done by the handler to the cursor. + Ignore, +} + +/// Something that can do multi step migrations. +pub trait MultiStepMigrator { + /// Hint for whether [`Self::step`] should be called. + fn ongoing() -> bool; + + /// Do the next step in the MBM process. + /// + /// Must gracefully handle the case that it is currently not upgrading. + fn step() -> Weight; +} + +impl MultiStepMigrator for () { + fn ongoing() -> bool { + false + } + + fn step() -> Weight { + Weight::zero() + } +} + +/// Multiple [`SteppedMigration`]. +pub trait SteppedMigrations { + /// The number of migrations that `Self` aggregates. + fn len() -> u32; + + /// The `n`th [`SteppedMigration::id`]. + /// + /// Is guaranteed to return `Some` if `n < Self::len()`. + fn nth_id(n: u32) -> Option>; + + /// The [`SteppedMigration::max_steps`] of the `n`th migration. + /// + /// Is guaranteed to return `Some` if `n < Self::len()`. + fn nth_max_steps(n: u32) -> Option>; + + /// Do a [`SteppedMigration::step`] on the `n`th migration. + /// + /// Is guaranteed to return `Some` if `n < Self::len()`. + fn nth_step( + n: u32, + cursor: Option>, + meter: &mut WeightMeter, + ) -> Option>, SteppedMigrationError>>; + + /// Do a [`SteppedMigration::transactional_step`] on the `n`th migration. + /// + /// Is guaranteed to return `Some` if `n < Self::len()`. + fn nth_transactional_step( + n: u32, + cursor: Option>, + meter: &mut WeightMeter, + ) -> Option>, SteppedMigrationError>>; + + /// The maximal encoded length across all cursors. + fn cursor_max_encoded_len() -> usize; + + /// The maximal encoded length across all identifiers. + fn identifier_max_encoded_len() -> usize; + + /// Assert the integrity of the migrations. + /// + /// Should be executed as part of a test prior to runtime usage. May or may not need + /// externalities. + #[cfg(feature = "std")] + fn integrity_test() -> Result<(), &'static str> { + use crate::ensure; + let l = Self::len(); + + for n in 0..l { + ensure!(Self::nth_id(n).is_some(), "id is None"); + ensure!(Self::nth_max_steps(n).is_some(), "steps is None"); + + // The cursor that we use does not matter. Hence use empty. + ensure!( + Self::nth_step(n, Some(vec![]), &mut WeightMeter::new()).is_some(), + "steps is None" + ); + ensure!( + Self::nth_transactional_step(n, Some(vec![]), &mut WeightMeter::new()).is_some(), + "steps is None" + ); + } + + Ok(()) + } +} + +impl SteppedMigrations for () { + fn len() -> u32 { + 0 + } + + fn nth_id(_n: u32) -> Option> { + None + } + + fn nth_max_steps(_n: u32) -> Option> { + None + } + + fn nth_step( + _n: u32, + _cursor: Option>, + _meter: &mut WeightMeter, + ) -> Option>, SteppedMigrationError>> { + None + } + + fn nth_transactional_step( + _n: u32, + _cursor: Option>, + _meter: &mut WeightMeter, + ) -> Option>, SteppedMigrationError>> { + None + } + + fn cursor_max_encoded_len() -> usize { + 0 + } + + fn identifier_max_encoded_len() -> usize { + 0 + } +} + +// A collection consisting of only a single migration. +impl SteppedMigrations for T { + fn len() -> u32 { + 1 + } + + fn nth_id(_n: u32) -> Option> { + Some(T::id().encode()) + } + + fn nth_max_steps(n: u32) -> Option> { + // It should be generally fine to call with n>0, but the code should not attempt to. + n.is_zero() + .then_some(T::max_steps()) + .defensive_proof("nth_max_steps should only be called with n==0") + } + + fn nth_step( + _n: u32, + cursor: Option>, + meter: &mut WeightMeter, + ) -> Option>, SteppedMigrationError>> { + if !_n.is_zero() { + defensive!("nth_step should only be called with n==0"); + return None + } + + let cursor = match cursor { + Some(cursor) => match T::Cursor::decode(&mut &cursor[..]) { + Ok(cursor) => Some(cursor), + Err(_) => return Some(Err(SteppedMigrationError::InvalidCursor)), + }, + None => None, + }; + + Some(T::step(cursor, meter).map(|cursor| cursor.map(|cursor| cursor.encode()))) + } + + fn nth_transactional_step( + n: u32, + cursor: Option>, + meter: &mut WeightMeter, + ) -> Option>, SteppedMigrationError>> { + if n != 0 { + defensive!("nth_transactional_step should only be called with n==0"); + return None + } + + let cursor = match cursor { + Some(cursor) => match T::Cursor::decode(&mut &cursor[..]) { + Ok(cursor) => Some(cursor), + Err(_) => return Some(Err(SteppedMigrationError::InvalidCursor)), + }, + None => None, + }; + + Some( + T::transactional_step(cursor, meter).map(|cursor| cursor.map(|cursor| cursor.encode())), + ) + } + + fn cursor_max_encoded_len() -> usize { + T::Cursor::max_encoded_len() + } + + fn identifier_max_encoded_len() -> usize { + T::Identifier::max_encoded_len() + } +} + +#[impl_trait_for_tuples::impl_for_tuples(1, 30)] +impl SteppedMigrations for Tuple { + fn len() -> u32 { + for_tuples!( #( Tuple::len() )+* ) + } + + fn nth_id(n: u32) -> Option> { + let mut i = 0; + + for_tuples!( #( + if (i + Tuple::len()) > n { + return Tuple::nth_id(n - i) + } + + i += Tuple::len(); + )* ); + + None + } + + fn nth_step( + n: u32, + cursor: Option>, + meter: &mut WeightMeter, + ) -> Option>, SteppedMigrationError>> { + let mut i = 0; + + for_tuples!( #( + if (i + Tuple::len()) > n { + return Tuple::nth_step(n - i, cursor, meter) + } + + i += Tuple::len(); + )* ); + + None + } + + fn nth_transactional_step( + n: u32, + cursor: Option>, + meter: &mut WeightMeter, + ) -> Option>, SteppedMigrationError>> { + let mut i = 0; + + for_tuples! ( #( + if (i + Tuple::len()) > n { + return Tuple::nth_transactional_step(n - i, cursor, meter) + } + + i += Tuple::len(); + )* ); + + None + } + + fn nth_max_steps(n: u32) -> Option> { + let mut i = 0; + + for_tuples!( #( + if (i + Tuple::len()) > n { + return Tuple::nth_max_steps(n - i) + } + + i += Tuple::len(); + )* ); + + None + } + + fn cursor_max_encoded_len() -> usize { + let mut max_len = 0; + + for_tuples!( #( + max_len = max_len.max(Tuple::cursor_max_encoded_len()); + )* ); + + max_len + } + + fn identifier_max_encoded_len() -> usize { + let mut max_len = 0; + + for_tuples!( #( + max_len = max_len.max(Tuple::identifier_max_encoded_len()); + )* ); + + max_len + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{assert_ok, storage::unhashed}; + + #[derive(Decode, Encode, MaxEncodedLen, Eq, PartialEq)] + pub enum Either { + Left(L), + Right(R), + } + + pub struct M0; + impl SteppedMigration for M0 { + type Cursor = (); + type Identifier = u8; + + fn id() -> Self::Identifier { + 0 + } + + fn step( + _cursor: Option, + _meter: &mut WeightMeter, + ) -> Result, SteppedMigrationError> { + log::info!("M0"); + unhashed::put(&[0], &()); + Ok(None) + } + } + + pub struct M1; + impl SteppedMigration for M1 { + type Cursor = (); + type Identifier = u8; + + fn id() -> Self::Identifier { + 1 + } + + fn step( + _cursor: Option, + _meter: &mut WeightMeter, + ) -> Result, SteppedMigrationError> { + log::info!("M1"); + unhashed::put(&[1], &()); + Ok(None) + } + + fn max_steps() -> Option { + Some(1) + } + } + + pub struct M2; + impl SteppedMigration for M2 { + type Cursor = (); + type Identifier = u8; + + fn id() -> Self::Identifier { + 2 + } + + fn step( + _cursor: Option, + _meter: &mut WeightMeter, + ) -> Result, SteppedMigrationError> { + log::info!("M2"); + unhashed::put(&[2], &()); + Ok(None) + } + + fn max_steps() -> Option { + Some(2) + } + } + + pub struct F0; + impl SteppedMigration for F0 { + type Cursor = (); + type Identifier = u8; + + fn id() -> Self::Identifier { + 3 + } + + fn step( + _cursor: Option, + _meter: &mut WeightMeter, + ) -> Result, SteppedMigrationError> { + log::info!("F0"); + unhashed::put(&[3], &()); + Err(SteppedMigrationError::Failed) + } + } + + // Three migrations combined to execute in order: + type Triple = (M0, (M1, M2)); + // Six migrations, just concatenating the ones from before: + type Hextuple = (Triple, Triple); + + #[test] + fn singular_migrations_work() { + assert_eq!(M0::max_steps(), None); + assert_eq!(M1::max_steps(), Some(1)); + assert_eq!(M2::max_steps(), Some(2)); + + assert_eq!(<(M0, M1)>::nth_max_steps(0), Some(None)); + assert_eq!(<(M0, M1)>::nth_max_steps(1), Some(Some(1))); + assert_eq!(<(M0, M1, M2)>::nth_max_steps(2), Some(Some(2))); + + assert_eq!(<(M0, M1)>::nth_max_steps(2), None); + } + + #[test] + fn tuple_migrations_work() { + assert_eq!(<() as SteppedMigrations>::len(), 0); + assert_eq!(<((), ((), ())) as SteppedMigrations>::len(), 0); + assert_eq!(::len(), 3); + assert_eq!(::len(), 6); + + // Check the IDs. The index specific functions all return an Option, + // to account for the out-of-range case. + assert_eq!(::nth_id(0), Some(0u8.encode())); + assert_eq!(::nth_id(1), Some(1u8.encode())); + assert_eq!(::nth_id(2), Some(2u8.encode())); + + sp_io::TestExternalities::default().execute_with(|| { + for n in 0..3 { + ::nth_step( + n, + Default::default(), + &mut WeightMeter::new(), + ); + } + }); + } + + #[test] + fn integrity_test_works() { + sp_io::TestExternalities::default().execute_with(|| { + assert_ok!(<() as SteppedMigrations>::integrity_test()); + assert_ok!(::integrity_test()); + assert_ok!(::integrity_test()); + assert_ok!(::integrity_test()); + assert_ok!(::integrity_test()); + assert_ok!(::integrity_test()); + }); + } + + #[test] + fn transactional_rollback_works() { + sp_io::TestExternalities::default().execute_with(|| { + assert_ok!(<(M0, F0) as SteppedMigrations>::nth_transactional_step( + 0, + Default::default(), + &mut WeightMeter::new() + ) + .unwrap()); + assert!(unhashed::exists(&[0])); + + let _g = crate::StorageNoopGuard::new(); + assert!(<(M0, F0) as SteppedMigrations>::nth_transactional_step( + 1, + Default::default(), + &mut WeightMeter::new() + ) + .unwrap() + .is_err()); + assert!(<(F0, M1) as SteppedMigrations>::nth_transactional_step( + 0, + Default::default(), + &mut WeightMeter::new() + ) + .unwrap() + .is_err()); + }); + } +} diff --git a/substrate/frame/support/src/storage/transactional.rs b/substrate/frame/support/src/storage/transactional.rs index d42e1809e91292a8e0ad367af4e203b8b1b17f87..0671db4a3a86bc76f7706df6eefe28a8c34c0701 100644 --- a/substrate/frame/support/src/storage/transactional.rs +++ b/substrate/frame/support/src/storage/transactional.rs @@ -127,6 +127,22 @@ where } } +/// Same as [`with_transaction`] but casts any internal error to `()`. +/// +/// This rids `E` of the `From` bound that is required by `with_transaction`. +pub fn with_transaction_opaque_err(f: F) -> Result, ()> +where + F: FnOnce() -> TransactionOutcome>, +{ + with_transaction(move || -> TransactionOutcome, DispatchError>> { + match f() { + TransactionOutcome::Commit(res) => TransactionOutcome::Commit(Ok(res)), + TransactionOutcome::Rollback(res) => TransactionOutcome::Rollback(Ok(res)), + } + }) + .map_err(|_| ()) +} + /// Same as [`with_transaction`] but without a limit check on nested transactional layers. /// /// This is mostly for backwards compatibility before there was a transactional layer limit. diff --git a/substrate/frame/support/src/traits.rs b/substrate/frame/support/src/traits.rs index 3d0429f71b11d6c74f87ef33521634f46b0d591c..1997d8fc223efe7ff1d1be17e2bc10fb973d2cb9 100644 --- a/substrate/frame/support/src/traits.rs +++ b/substrate/frame/support/src/traits.rs @@ -59,10 +59,10 @@ pub use misc::{ AccountTouch, Backing, ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstU128, ConstU16, ConstU32, ConstU64, ConstU8, DefensiveMax, DefensiveMin, DefensiveSaturating, DefensiveTruncateFrom, EnsureInherentsAreFirst, EqualPrivilegeOnly, EstimateCallFee, - ExecuteBlock, ExtrinsicCall, Get, GetBacking, GetDefault, HandleLifetime, IsSubType, IsType, - Len, OffchainWorker, OnKilledAccount, OnNewAccount, PrivilegeCmp, SameOrOther, Time, - TryCollect, TryDrop, TypedGet, UnixTime, VariantCount, VariantCountOf, WrapperKeepOpaque, - WrapperOpaque, + ExecuteBlock, ExtrinsicCall, Get, GetBacking, GetDefault, HandleLifetime, IsInherent, + IsSubType, IsType, Len, OffchainWorker, OnKilledAccount, OnNewAccount, PrivilegeCmp, + SameOrOther, Time, TryCollect, TryDrop, TypedGet, UnixTime, VariantCount, VariantCountOf, + WrapperKeepOpaque, WrapperOpaque, }; #[allow(deprecated)] pub use misc::{PreimageProvider, PreimageRecipient}; @@ -86,7 +86,8 @@ mod hooks; pub use hooks::GenesisBuild; pub use hooks::{ BeforeAllRuntimeMigrations, BuildGenesisConfig, Hooks, IntegrityTest, OnFinalize, OnGenesis, - OnIdle, OnInitialize, OnRuntimeUpgrade, OnTimestampSet, + OnIdle, OnInitialize, OnPoll, OnRuntimeUpgrade, OnTimestampSet, PostInherents, + PostTransactions, PreInherents, }; pub mod schedule; diff --git a/substrate/frame/support/src/traits/hooks.rs b/substrate/frame/support/src/traits/hooks.rs index 20788ce932d8b397990767c6ae8a0d82016cccbd..7d0e5aa1e89ca1d76e81d526881041c09c6e6cf3 100644 --- a/substrate/frame/support/src/traits/hooks.rs +++ b/substrate/frame/support/src/traits/hooks.rs @@ -25,10 +25,74 @@ use crate::weights::Weight; use impl_trait_for_tuples::impl_for_tuples; use sp_runtime::traits::AtLeast32BitUnsigned; use sp_std::prelude::*; +use sp_weights::WeightMeter; #[cfg(feature = "try-runtime")] use sp_runtime::TryRuntimeError; +/// Provides a callback to execute logic before the all inherents. +pub trait PreInherents { + /// Called before all inherents were applied but after `on_initialize`. + fn pre_inherents() {} +} + +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] +impl PreInherents for Tuple { + fn pre_inherents() { + for_tuples!( #( Tuple::pre_inherents(); )* ); + } +} + +/// Provides a callback to execute logic after the all inherents. +pub trait PostInherents { + /// Called after all inherents were applied. + fn post_inherents() {} +} + +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] +impl PostInherents for Tuple { + fn post_inherents() { + for_tuples!( #( Tuple::post_inherents(); )* ); + } +} + +/// Provides a callback to execute logic before the all transactions. +pub trait PostTransactions { + /// Called after all transactions were applied but before `on_finalize`. + fn post_transactions() {} +} + +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] +impl PostTransactions for Tuple { + fn post_transactions() { + for_tuples!( #( Tuple::post_transactions(); )* ); + } +} + +/// Periodically executes logic. Is not guaranteed to run within a specific timeframe and should +/// only be used on logic that has no deadline. +pub trait OnPoll { + /// Code to execute every now and then at the beginning of the block after inherent application. + /// + /// The remaining weight limit must be respected. + fn on_poll(_n: BlockNumber, _weight: &mut WeightMeter) {} +} + +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] +impl OnPoll for Tuple { + fn on_poll(n: BlockNumber, weight: &mut WeightMeter) { + for_tuples!( #( Tuple::on_poll(n.clone(), weight); )* ); + } +} + /// See [`Hooks::on_initialize`]. pub trait OnInitialize { /// See [`Hooks::on_initialize`]. @@ -90,7 +154,7 @@ impl OnIdle for Tuple { /// /// Implementing this trait for a pallet let's you express operations that should /// happen at genesis. It will be called in an externalities provided environment and -/// will see the genesis state after all pallets have written their genesis state. +/// will set the genesis state after all pallets have written their genesis state. #[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] #[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] #[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] @@ -306,19 +370,23 @@ pub trait IntegrityTest { /// end /// ``` /// -/// * `OnRuntimeUpgrade` is only executed before everything else if a code -/// * `OnRuntimeUpgrade` is mandatorily at the beginning of the block body (extrinsics) being -/// processed. change is detected. -/// * Extrinsics start with inherents, and continue with other signed or unsigned extrinsics. -/// * `OnIdle` optionally comes after extrinsics. -/// `OnFinalize` mandatorily comes after `OnIdle`. +/// * [`OnRuntimeUpgrade`](Hooks::OnRuntimeUpgrade) hooks are only executed when a code change is +/// detected. +/// * [`OnRuntimeUpgrade`](Hooks::OnRuntimeUpgrade) hooks are mandatorily executed at the very +/// beginning of the block body, before any extrinsics are processed. +/// * [`Inherents`](sp_inherents) are always executed before any other other signed or unsigned +/// extrinsics. +/// * [`OnIdle`](Hooks::OnIdle) hooks are executed after extrinsics if there is weight remaining in +/// the block. +/// * [`OnFinalize`](Hooks::OnFinalize) hooks are mandatorily executed after +/// [`OnIdle`](Hooks::OnIdle). /// -/// > `OffchainWorker` is not part of this flow, as it is not really part of the consensus/main -/// > block import path, and is called optionally, and in other circumstances. See -/// > [`crate::traits::misc::OffchainWorker`] for more information. +/// > [`OffchainWorker`](crate::traits::misc::OffchainWorker) hooks are not part of this flow, +/// > because they are not part of the consensus/main block building logic. See +/// > [`OffchainWorker`](crate::traits::misc::OffchainWorker) for more information. /// -/// To learn more about the execution of hooks see `frame-executive` as this component is is charge -/// of dispatching extrinsics and placing the hooks in the correct order. +/// To learn more about the execution of hooks see the FRAME `Executive` pallet which is in charge +/// of dispatching extrinsics and calling hooks in the correct order. pub trait Hooks { /// Block initialization hook. This is called at the very beginning of block execution. /// @@ -370,30 +438,44 @@ pub trait Hooks { Weight::zero() } - /// Hook executed when a code change (aka. a "runtime upgrade") is detected by FRAME. + /// A hook to run logic after inherent application. + /// + /// Is not guaranteed to execute in a block and should therefore only be used in no-deadline + /// scenarios. + fn on_poll(_n: BlockNumber, _weight: &mut WeightMeter) {} + + /// Hook executed when a code change (aka. a "runtime upgrade") is detected by the FRAME + /// `Executive` pallet. /// /// Be aware that this is called before [`Hooks::on_initialize`] of any pallet; therefore, a lot /// of the critical storage items such as `block_number` in system pallet might have not been - /// set. + /// set yet. /// - /// Vert similar to [`Hooks::on_initialize`], any code in this block is mandatory and MUST - /// execute. Use with care. + /// Similar to [`Hooks::on_initialize`], any code in this block is mandatory and MUST execute. + /// It is strongly recommended to dry-run the execution of these hooks using + /// [try-runtime-cli](https://github.com/paritytech/try-runtime-cli) to ensure they will not + /// produce and overweight block which can brick your chain. Use with care! /// - /// ## Implementation Note: Versioning + /// ## Implementation Note: Standalone Migrations /// - /// 1. An implementation of this should typically follow a pattern where the version of the - /// pallet is checked against the onchain version, and a decision is made about what needs to be - /// done. This is helpful to prevent accidental repetitive execution of this hook, which can be - /// catastrophic. + /// Additional migrations can be created by directly implementing [`OnRuntimeUpgrade`] on + /// structs and passing them to `Executive`. /// - /// Alternatively, [`frame_support::migrations::VersionedMigration`] can be used to assist with - /// this. + /// ## Implementation Note: Pallet Versioning /// - /// ## Implementation Note: Runtime Level Migration + /// Implementations of this hook are typically wrapped in + /// [`crate::migrations::VersionedMigration`] to ensure the migration is executed exactly + /// once and only when it is supposed to. /// - /// Additional "upgrade hooks" can be created by pallets by a manual implementation of - /// [`Hooks::on_runtime_upgrade`] which can be passed on to `Executive` at the top level - /// runtime. + /// Alternatively, developers can manually implement version checks. + /// + /// Failure to adequately check storage versions can result in accidental repetitive execution + /// of the hook, which can be catastrophic. + /// + /// ## Implementation Note: Weight + /// + /// Typically, implementations of this method are simple enough that weights can be calculated + /// manually. However, if required, a benchmark can also be used. fn on_runtime_upgrade() -> Weight { Weight::zero() } @@ -403,7 +485,7 @@ pub trait Hooks { /// It should focus on certain checks to ensure that the state is sensible. This is never /// executed in a consensus code-path, therefore it can consume as much weight as it needs. /// - /// This hook should not alter any storage. + /// This hook must not alter any storage. #[cfg(feature = "try-runtime")] fn try_state(_n: BlockNumber) -> Result<(), TryRuntimeError> { Ok(()) @@ -415,7 +497,7 @@ pub trait Hooks { /// which will be passed to `post_upgrade` after upgrading for post-check. An empty vector /// should be returned if there is no such need. /// - /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. + /// This hook is never executed on-chain but instead used by testing tools. #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, TryRuntimeError> { Ok(Vec::new()) @@ -434,7 +516,7 @@ pub trait Hooks { } /// Implementing this function on a pallet allows you to perform long-running tasks that are - /// dispatched as separate threads, and entirely independent of the main wasm runtime. + /// dispatched as separate threads, and entirely independent of the main blockchain execution. /// /// This function can freely read from the state, but any change it makes to the state is /// meaningless. Writes can be pushed back to the chain by submitting extrinsics from the diff --git a/substrate/frame/support/src/traits/messages.rs b/substrate/frame/support/src/traits/messages.rs index 995ac4f717911195e4dba202d350c6cbc09ae340..f3d893bcc1d899fce06ef00a38d687bfb3ea0181 100644 --- a/substrate/frame/support/src/traits/messages.rs +++ b/substrate/frame/support/src/traits/messages.rs @@ -123,6 +123,8 @@ impl ServiceQueues for NoopServiceQueues { pub struct QueueFootprint { /// The number of pages in the queue (including overweight pages). pub pages: u32, + /// The number of pages that are ready (not yet processed and also not overweight). + pub ready_pages: u32, /// The storage footprint of the queue (including overweight messages). pub storage: Footprint, } diff --git a/substrate/frame/support/src/traits/metadata.rs b/substrate/frame/support/src/traits/metadata.rs index 586af20511a89f82caf33297177a12f6d0988bf4..8bda4186bc967b29a6342ea96dd0a1cdc5072438 100644 --- a/substrate/frame/support/src/traits/metadata.rs +++ b/substrate/frame/support/src/traits/metadata.rs @@ -261,41 +261,64 @@ impl Add for StorageVersion { } } -/// Special marker struct if no storage version is set for a pallet. +/// Special marker struct used when [`storage_version`](crate::pallet_macros::storage_version) is +/// not defined for a pallet. /// /// If you (the reader) end up here, it probably means that you tried to compare /// [`GetStorageVersion::on_chain_storage_version`] against -/// [`GetStorageVersion::current_storage_version`]. This basically means that the -/// [`storage_version`](crate::pallet_macros::storage_version) is missing in the pallet where the -/// mentioned functions are being called. +/// [`GetStorageVersion::in_code_storage_version`]. This basically means that the +/// [`storage_version`](crate::pallet_macros::storage_version) is missing from the pallet where the +/// mentioned functions are being called, and needs to be defined. #[derive(Debug, Default)] pub struct NoStorageVersionSet; -/// Provides information about the storage version of a pallet. +/// Provides information about a pallet's storage versions. /// -/// It differentiates between current and on-chain storage version. Both should be only out of sync -/// when a new runtime upgrade was applied and the runtime migrations did not yet executed. -/// Otherwise it means that the pallet works with an unsupported storage version and unforeseen -/// stuff can happen. +/// Every pallet has two storage versions: +/// 1. An in-code storage version +/// 2. An on-chain storage version /// -/// The current storage version is the version of the pallet as supported at runtime. The active -/// storage version is the version of the pallet in the storage. +/// The in-code storage version is the version of the pallet as defined in the runtime blob, and the +/// on-chain storage version is the version of the pallet stored on-chain. /// -/// It is required to update the on-chain storage version manually when a migration was applied. +/// Storage versions should be only ever be out of sync when a pallet has been updated to a new +/// version and the in-code version is incremented, but the migration has not yet been executed +/// on-chain as part of a runtime upgrade. +/// +/// It is the responsibility of the developer to ensure that the on-chain storage version is set +/// correctly during a migration so that it matches the in-code storage version. pub trait GetStorageVersion { - /// This will be filled out by the [`pallet`](crate::pallet) macro. + /// This type is generated by the [`pallet`](crate::pallet) macro. + /// + /// If the [`storage_version`](crate::pallet_macros::storage_version) attribute isn't specified, + /// this is set to [`NoStorageVersionSet`] to signify that it is missing. + /// + /// If the [`storage_version`](crate::pallet_macros::storage_version) attribute is specified, + /// this is be set to a [`StorageVersion`] corresponding to the attribute. /// - /// If the [`storage_version`](crate::pallet_macros::storage_version) attribute isn't given - /// this is set to [`NoStorageVersionSet`] to inform the user that the attribute is missing. - /// This should prevent that the user forgets to set a storage version when required. However, - /// this will only work when the user actually tries to call [`Self::current_storage_version`] - /// to compare it against the [`Self::on_chain_storage_version`]. If the attribute is given, - /// this will be set to [`StorageVersion`]. - type CurrentStorageVersion; - - /// Returns the current storage version as supported by the pallet. - fn current_storage_version() -> Self::CurrentStorageVersion; - /// Returns the on-chain storage version of the pallet as stored in the storage. + /// The intention of using [`NoStorageVersionSet`] instead of defaulting to a [`StorageVersion`] + /// of zero is to prevent developers from forgetting to set + /// [`storage_version`](crate::pallet_macros::storage_version) when it is required, like in the + /// case that they wish to compare the in-code storage version to the on-chain storage version. + type InCodeStorageVersion; + + #[deprecated( + note = "This method has been renamed to `in_code_storage_version` and will be removed after March 2024." + )] + /// DEPRECATED: Use [`Self::current_storage_version`] instead. + /// + /// Returns the in-code storage version as specified in the + /// [`storage_version`](crate::pallet_macros::storage_version) attribute, or + /// [`NoStorageVersionSet`] if the attribute is missing. + fn current_storage_version() -> Self::InCodeStorageVersion { + Self::in_code_storage_version() + } + + /// Returns the in-code storage version as specified in the + /// [`storage_version`](crate::pallet_macros::storage_version) attribute, or + /// [`NoStorageVersionSet`] if the attribute is missing. + fn in_code_storage_version() -> Self::InCodeStorageVersion; + /// Returns the storage version of the pallet as last set in the actual on-chain storage. fn on_chain_storage_version() -> StorageVersion; } diff --git a/substrate/frame/support/src/traits/misc.rs b/substrate/frame/support/src/traits/misc.rs index eafd9c8abdd2dfc4b4174723678d94aa963ad738..1f634a642829c1d6a3e72e0e76ef25bdf2bef55a 100644 --- a/substrate/frame/support/src/traits/misc.rs +++ b/substrate/frame/support/src/traits/misc.rs @@ -23,6 +23,7 @@ use impl_trait_for_tuples::impl_for_tuples; use scale_info::{build::Fields, meta_type, Path, Type, TypeInfo, TypeParameter}; use sp_arithmetic::traits::{CheckedAdd, CheckedMul, CheckedSub, One, Saturating}; use sp_core::bounded::bounded_vec::TruncateFrom; + #[doc(hidden)] pub use sp_runtime::traits::{ ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstU128, ConstU16, ConstU32, @@ -895,11 +896,21 @@ pub trait GetBacking { /// A trait to ensure the inherent are before non-inherent in a block. /// /// This is typically implemented on runtime, through `construct_runtime!`. -pub trait EnsureInherentsAreFirst { +pub trait EnsureInherentsAreFirst: + IsInherent<::Extrinsic> +{ /// Ensure the position of inherent is correct, i.e. they are before non-inherents. /// - /// On error return the index of the inherent with invalid position (counting from 0). - fn ensure_inherents_are_first(block: &Block) -> Result<(), u32>; + /// On error return the index of the inherent with invalid position (counting from 0). On + /// success it returns the index of the last inherent. `0` therefore means that there are no + /// inherents. + fn ensure_inherents_are_first(block: &Block) -> Result; +} + +/// A trait to check if an extrinsic is an inherent. +pub trait IsInherent { + /// Whether this extrinsic is an inherent. + fn is_inherent(ext: &Extrinsic) -> bool; } /// An extrinsic on which we can get access to call. diff --git a/substrate/frame/support/test/Cargo.toml b/substrate/frame/support/test/Cargo.toml index 1dbf46109c87787945c2bc23350a05a242f991fb..2f12cc00ed9e08560870e6f62b1d1930bad90832 100644 --- a/substrate/frame/support/test/Cargo.toml +++ b/substrate/frame/support/test/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] static_assertions = "1.1.0" -serde = { version = "1.0.196", default-features = false, features = ["derive"] } +serde = { features = ["derive"], workspace = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-metadata = { version = "16.0.0", default-features = false, features = ["current"] } @@ -24,7 +24,7 @@ sp-api = { path = "../../../primitives/api", default-features = false } sp-arithmetic = { path = "../../../primitives/arithmetic", default-features = false } sp-io = { path = "../../../primitives/io", default-features = false } sp-state-machine = { path = "../../../primitives/state-machine", optional = true } -frame-support = { path = "..", default-features = false } +frame-support = { path = "..", default-features = false, features = ["experimental"] } frame-benchmarking = { path = "../../benchmarking", default-features = false } sp-runtime = { path = "../../../primitives/runtime", default-features = false } sp-core = { path = "../../../primitives/core", default-features = false } diff --git a/substrate/frame/support/test/pallet/Cargo.toml b/substrate/frame/support/test/pallet/Cargo.toml index 94afab76d89ee58db04f6b8c6074d4e843ee4057..ca889faef876b197cb15a338e0a65b30536b79ec 100644 --- a/substrate/frame/support/test/pallet/Cargo.toml +++ b/substrate/frame/support/test/pallet/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", default-features = false, features = ["derive"] } +serde = { features = ["derive"], workspace = true } frame-support = { path = "../..", default-features = false } frame-system = { path = "../../../system", default-features = false } sp-runtime = { path = "../../../../primitives/runtime", default-features = false } diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr index 61cfc07caedc37e1a8427e8fadc61406813c52e6..09c4d290ef5c3d49d244648242e6324d4cf760e5 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr @@ -347,7 +347,7 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied 26 | System: frame_system::{Pallet, Call, Storage, Config, Event}, | ^^^^^^ the trait `Config` is not implemented for `Runtime` | -note: required by a bound in `frame_system::GenesisConfig` +note: required by a bound in `GenesisConfig` --> $WORKSPACE/substrate/frame/system/src/lib.rs | | pub struct GenesisConfig { diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.stderr index 3b6329c650fa65208e01c7d991e321f256b7eecf..6160f8234a350cc3ee0a0761cab0eed6ea022525 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.stderr @@ -4,88 +4,24 @@ error: The number of pallets exceeds the maximum number of tuple elements. To in 67 | pub struct Runtime | ^^^ -error[E0412]: cannot find type `RuntimeCall` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:35:64 - | -35 | pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; - | ^^^^^^^^^^^ not found in this scope - | -help: you might be missing a type parameter - | -35 | pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; - | +++++++++++++ - -error[E0412]: cannot find type `Runtime` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:37:25 - | -37 | impl pallet::Config for Runtime {} - | ^^^^^^^ not found in this scope - -error[E0412]: cannot find type `Runtime` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:40:31 - | -40 | impl frame_system::Config for Runtime { - | ^^^^^^^ not found in this scope - -error[E0412]: cannot find type `RuntimeOrigin` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:42:23 - | -42 | type RuntimeOrigin = RuntimeOrigin; - | ^^^^^^^^^^^^^ - | -help: you might have meant to use the associated type - | -42 | type RuntimeOrigin = Self::RuntimeOrigin; - | ++++++ - -error[E0412]: cannot find type `RuntimeCall` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:44:21 - | -44 | type RuntimeCall = RuntimeCall; - | ^^^^^^^^^^^ - | -help: you might have meant to use the associated type - | -44 | type RuntimeCall = Self::RuntimeCall; - | ++++++ - -error[E0412]: cannot find type `RuntimeEvent` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:50:22 - | -50 | type RuntimeEvent = RuntimeEvent; - | ^^^^^^^^^^^^ - | -help: you might have meant to use the associated type - | -50 | type RuntimeEvent = Self::RuntimeEvent; - | ++++++ - -error[E0412]: cannot find type `PalletInfo` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:56:20 - | -56 | type PalletInfo = PalletInfo; - | ^^^^^^^^^^ - | -help: you might have meant to use the associated type - | -56 | type PalletInfo = Self::PalletInfo; - | ++++++ -help: consider importing one of these items - | -18 + use frame_benchmarking::__private::traits::PalletInfo; - | -18 + use frame_support::traits::PalletInfo; - | - -error[E0412]: cannot find type `RuntimeTask` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:39:1 - | -39 | #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - | - = note: this error originates in the macro `frame_system::config_preludes::TestDefaultConfig` which comes from the expansion of the macro `frame_support::macro_magic::forward_tokens_verbatim` (in Nightly builds, run with -Z macro-backtrace for more info) -help: you might have meant to use the associated type - --> $WORKSPACE/substrate/frame/system/src/lib.rs - | - | type Self::RuntimeTask = (); - | ++++++ +error: recursion limit reached while expanding `frame_support::__private::tt_return!` + --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:22:1 + | +22 | / #[frame_support::pallet] +23 | | mod pallet { +24 | | #[pallet::config] +25 | | pub trait Config: frame_system::Config {} +... | +66 | |/ construct_runtime! { +67 | || pub struct Runtime +68 | || { +69 | || System: frame_system::{Pallet, Call, Storage, Config, Event}, +... || +180 | || } +181 | || } + | ||_^ + | |_| + | in this macro invocation + | + = help: consider increasing the recursion limit by adding a `#![recursion_limit = "256"]` attribute to your crate (`$CRATE`) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr index a4c7ecf786588a782f96b3ae890c67973c2841fc..30005c07cb631dd48425117507006faa870866cd 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr @@ -53,8 +53,9 @@ error[E0599]: no function or associated item named `is_inherent` found for struc | |_^ function or associated item not found in `Pallet` | = help: items from traits can only be used if the trait is implemented and in scope - = note: the following trait defines an item `is_inherent`, perhaps you need to implement it: + = note: the following traits define an item `is_inherent`, perhaps you need to implement one of them: candidate #1: `ProvideInherent` + candidate #2: `IsInherent` = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0599]: no function or associated item named `check_inherent` found for struct `pallet::Pallet` in the current scope @@ -119,3 +120,23 @@ error[E0599]: no function or associated item named `is_inherent_required` found = note: the following trait defines an item `is_inherent_required`, perhaps you need to implement it: candidate #1: `ProvideInherent` = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `pallet::Pallet: ProvideInherent` is not satisfied + --> tests/construct_runtime_ui/undefined_inherent_part.rs:70:3 + | +70 | Pallet: pallet expanded::{}::{Pallet, Inherent}, + | ^^^^^^ the trait `ProvideInherent` is not implemented for `pallet::Pallet` + +error[E0277]: the trait bound `pallet::Pallet: ProvideInherent` is not satisfied + --> tests/construct_runtime_ui/undefined_inherent_part.rs:66:1 + | +66 | / construct_runtime! { +67 | | pub struct Runtime +68 | | { +69 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, +70 | | Pallet: pallet expanded::{}::{Pallet, Inherent}, +71 | | } +72 | | } + | |_^ the trait `ProvideInherent` is not implemented for `pallet::Pallet` + | + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/substrate/frame/support/test/tests/pallet.rs b/substrate/frame/support/test/tests/pallet.rs index 9b4381c2f82bd4baa78fc8d175079a0e90480da3..607f54ccc131d2f589baa21d786db50fdea5460b 100644 --- a/substrate/frame/support/test/tests/pallet.rs +++ b/substrate/frame/support/test/tests/pallet.rs @@ -209,7 +209,7 @@ pub mod pallet { where T::AccountId: From + From + SomeAssociation1, { - /// Doc comment put in metadata + /// call foo doc comment put in metadata #[pallet::call_index(0)] #[pallet::weight(Weight::from_parts(*foo as u64, 0))] pub fn foo( @@ -225,7 +225,7 @@ pub mod pallet { Ok(().into()) } - /// Doc comment put in metadata + /// call foo_storage_layer doc comment put in metadata #[pallet::call_index(1)] #[pallet::weight({1})] pub fn foo_storage_layer( @@ -270,7 +270,7 @@ pub mod pallet { #[pallet::error] #[derive(PartialEq, Eq)] pub enum Error { - /// doc comment put into metadata + /// error doc comment put in metadata InsufficientProposersBalance, NonExistentStorageValue, Code(u8), @@ -287,9 +287,8 @@ pub mod pallet { where T::AccountId: SomeAssociation1 + From, { - /// doc comment put in metadata + /// event doc comment put in metadata Proposed(::AccountId), - /// doc Spending(BalanceOf), Something(u32), SomethingElse(::_1), @@ -590,7 +589,7 @@ pub mod pallet2 { Self::deposit_event(Event::Something(31)); if UpdateStorageVersion::get() { - Self::current_storage_version().put::(); + Self::in_code_storage_version().put::(); } Weight::zero() @@ -750,8 +749,7 @@ pub type UncheckedExtrinsic = sp_runtime::testing::TestXt>; frame_support::construct_runtime!( - pub struct Runtime - { + pub struct Runtime { // Exclude part `Storage` in order not to check its metadata in tests. System: frame_system exclude_parts { Pallet, Storage }, Example: pallet, @@ -772,6 +770,14 @@ fn _ensure_call_is_correctly_excluded_and_included(call: RuntimeCall) { } } +fn maybe_docs(doc: Vec<&'static str>) -> Vec<&'static str> { + if cfg!(feature = "no-metadata-docs") { + vec![] + } else { + doc + } +} + #[test] fn transactional_works() { TestExternalities::default().execute_with(|| { @@ -1310,7 +1316,7 @@ fn pallet_on_genesis() { assert_eq!(pallet::Pallet::::on_chain_storage_version(), StorageVersion::new(0)); pallet::Pallet::::on_genesis(); assert_eq!( - pallet::Pallet::::current_storage_version(), + pallet::Pallet::::in_code_storage_version(), pallet::Pallet::::on_chain_storage_version(), ); }) @@ -1362,19 +1368,47 @@ fn migrate_from_pallet_version_to_storage_version() { }); } +#[test] +fn pallet_item_docs_in_metadata() { + // call + let call_variants = match meta_type::>().type_info().type_def { + scale_info::TypeDef::Variant(variants) => variants.variants, + _ => unreachable!(), + }; + + assert_eq!(call_variants[0].docs, maybe_docs(vec!["call foo doc comment put in metadata"])); + assert_eq!( + call_variants[1].docs, + maybe_docs(vec!["call foo_storage_layer doc comment put in metadata"]) + ); + assert!(call_variants[2].docs.is_empty()); + + // event + let event_variants = match meta_type::>().type_info().type_def { + scale_info::TypeDef::Variant(variants) => variants.variants, + _ => unreachable!(), + }; + + assert_eq!(event_variants[0].docs, maybe_docs(vec!["event doc comment put in metadata"])); + assert!(event_variants[1].docs.is_empty()); + + // error + let error_variants = match meta_type::>().type_info().type_def { + scale_info::TypeDef::Variant(variants) => variants.variants, + _ => unreachable!(), + }; + + assert_eq!(error_variants[0].docs, maybe_docs(vec!["error doc comment put in metadata"])); + assert!(error_variants[1].docs.is_empty()); + + // storage is already covered in the main `fn metadata` test. +} + #[test] fn metadata() { use codec::Decode; use frame_metadata::{v15::*, *}; - fn maybe_docs(doc: Vec<&'static str>) -> Vec<&'static str> { - if cfg!(feature = "no-metadata-docs") { - vec![] - } else { - doc - } - } - let readme = "Support code for the runtime.\n\nLicense: Apache-2.0\n"; let expected_pallet_doc = vec![" Pallet documentation", readme, readme]; @@ -2257,10 +2291,10 @@ fn pallet_on_chain_storage_version_initializes_correctly() { AllPalletsWithSystem, >; - // Simple example of a pallet with current version 10 being added to the runtime for the first + // Simple example of a pallet with in-code version 10 being added to the runtime for the first // time. TestExternalities::default().execute_with(|| { - let current_version = Example::current_storage_version(); + let in_code_version = Example::in_code_storage_version(); // Check the pallet has no storage items set. let pallet_hashed_prefix = twox_128(Example::name().as_bytes()); @@ -2271,14 +2305,14 @@ fn pallet_on_chain_storage_version_initializes_correctly() { // version. Executive::execute_on_runtime_upgrade(); - // Check that the storage version was initialized to the current version + // Check that the storage version was initialized to the in-code version let on_chain_version_after = StorageVersion::get::(); - assert_eq!(on_chain_version_after, current_version); + assert_eq!(on_chain_version_after, in_code_version); }); - // Pallet with no current storage version should have the on-chain version initialized to 0. + // Pallet with no in-code storage version should have the on-chain version initialized to 0. TestExternalities::default().execute_with(|| { - // Example4 current_storage_version is NoStorageVersionSet. + // Example4 in_code_storage_version is NoStorageVersionSet. // Check the pallet has no storage items set. let pallet_hashed_prefix = twox_128(Example4::name().as_bytes()); @@ -2308,7 +2342,7 @@ fn post_runtime_upgrade_detects_storage_version_issues() { impl OnRuntimeUpgrade for CustomUpgrade { fn on_runtime_upgrade() -> Weight { - Example2::current_storage_version().put::(); + Example2::in_code_storage_version().put::(); Default::default() } @@ -2351,14 +2385,14 @@ fn post_runtime_upgrade_detects_storage_version_issues() { >; TestExternalities::default().execute_with(|| { - // Set the on-chain version to one less than the current version for `Example`, simulating a + // Set the on-chain version to one less than the in-code version for `Example`, simulating a // forgotten migration StorageVersion::new(9).put::(); // The version isn't changed, we should detect it. assert!( Executive::try_runtime_upgrade(UpgradeCheckSelect::PreAndPost).unwrap_err() == - "On chain and current storage version do not match. Missing runtime upgrade?" + "On chain and in-code storage version do not match. Missing runtime upgrade?" .into() ); }); diff --git a/substrate/frame/support/test/tests/pallet_outer_enums_explicit.rs b/substrate/frame/support/test/tests/pallet_outer_enums_explicit.rs index 79e9d6786717a5a27717cda754575c2ecd478aa3..33b96dea9486396629e19274de8437f0ca3c3162 100644 --- a/substrate/frame/support/test/tests/pallet_outer_enums_explicit.rs +++ b/substrate/frame/support/test/tests/pallet_outer_enums_explicit.rs @@ -90,6 +90,7 @@ fn module_error_outer_enum_expand_explicit() { frame_system::Error::NonDefaultComposite => (), frame_system::Error::NonZeroRefCount => (), frame_system::Error::CallFiltered => (), + frame_system::Error::MultiBlockMigrationsOngoing => (), #[cfg(feature = "experimental")] frame_system::Error::InvalidTask => (), #[cfg(feature = "experimental")] diff --git a/substrate/frame/support/test/tests/pallet_outer_enums_implicit.rs b/substrate/frame/support/test/tests/pallet_outer_enums_implicit.rs index 4bd8ee0bb39a574b2ff3f59b571c1209fc675da4..db006fe79359aa9f096963ce9333933329c23508 100644 --- a/substrate/frame/support/test/tests/pallet_outer_enums_implicit.rs +++ b/substrate/frame/support/test/tests/pallet_outer_enums_implicit.rs @@ -90,6 +90,7 @@ fn module_error_outer_enum_expand_implicit() { frame_system::Error::NonDefaultComposite => (), frame_system::Error::NonZeroRefCount => (), frame_system::Error::CallFiltered => (), + frame_system::Error::MultiBlockMigrationsOngoing => (), #[cfg(feature = "experimental")] frame_system::Error::InvalidTask => (), #[cfg(feature = "experimental")] diff --git a/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.rs b/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.rs index 840a6dee20ccc7a836619a1ef334cca6e22e7887..d2ca9fc80991a87dbb6f20323f07d8adcd29449d 100644 --- a/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.rs +++ b/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.rs @@ -29,7 +29,7 @@ mod pallet { #[pallet::hooks] impl Hooks> for Pallet { fn on_runtime_upgrade() -> Weight { - if Self::current_storage_version() != Self::on_chain_storage_version() { + if Self::in_code_storage_version() != Self::on_chain_storage_version() { } diff --git a/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.stderr b/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.stderr index 1b48197cc9ed1049ea0f52bb011fbc95654c59c0..3256e69528a2d28527ccfe1fa4d77dadfc77baa1 100644 --- a/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.stderr @@ -1,7 +1,7 @@ error[E0369]: binary operation `!=` cannot be applied to type `NoStorageVersionSet` --> tests/pallet_ui/compare_unset_storage_version.rs:32:39 | -32 | if Self::current_storage_version() != Self::on_chain_storage_version() { +32 | if Self::in_code_storage_version() != Self::on_chain_storage_version() { | ------------------------------- ^^ -------------------------------- StorageVersion | | | NoStorageVersionSet diff --git a/substrate/frame/support/test/tests/pallet_ui/deprecated_store_attr.stderr b/substrate/frame/support/test/tests/pallet_ui/deprecated_store_attr.stderr deleted file mode 100644 index e227033d3646bac74b23267531730e0a59ce06fb..0000000000000000000000000000000000000000 --- a/substrate/frame/support/test/tests/pallet_ui/deprecated_store_attr.stderr +++ /dev/null @@ -1,10 +0,0 @@ -error: use of deprecated struct `pallet::_::Store`: - Use of `#[pallet::generate_store(pub(super) trait Store)]` will be removed after July 2023. - Check https://github.com/paritytech/substrate/pull/13535 for more details. - --> tests/pallet_ui/deprecated_store_attr.rs:24:3 - | -24 | #[pallet::generate_store(trait Store)] - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - | - = note: `-D deprecated` implied by `-D warnings` - = help: to override `-D warnings` add `#[allow(deprecated)]` diff --git a/substrate/frame/support/test/tests/pallet_ui/duplicate_store_attr.rs b/substrate/frame/support/test/tests/pallet_ui/duplicate_store_attr.rs deleted file mode 100644 index 334fd8a46af2e14ef175acfbbdbc6a895b890595..0000000000000000000000000000000000000000 --- a/substrate/frame/support/test/tests/pallet_ui/duplicate_store_attr.rs +++ /dev/null @@ -1,42 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#[frame_support::pallet] -mod pallet { - use frame_support::pallet_prelude::Hooks; - use frame_system::pallet_prelude::BlockNumberFor; - use frame_support::pallet_prelude::StorageValue; - - #[pallet::config] - pub trait Config: frame_system::Config {} - - #[pallet::pallet] - #[pallet::generate_store(trait Store)] - #[pallet::generate_store(trait Store)] - pub struct Pallet(core::marker::PhantomData); - - #[pallet::hooks] - impl Hooks> for Pallet {} - - #[pallet::call] - impl Pallet {} - - #[pallet::storage] - type Foo = StorageValue<_, u8>; -} - -fn main() {} diff --git a/substrate/frame/support/test/tests/pallet_ui/duplicate_store_attr.stderr b/substrate/frame/support/test/tests/pallet_ui/duplicate_store_attr.stderr deleted file mode 100644 index 864b399326e19ea96afa8d76ccf9815b057ceef6..0000000000000000000000000000000000000000 --- a/substrate/frame/support/test/tests/pallet_ui/duplicate_store_attr.stderr +++ /dev/null @@ -1,5 +0,0 @@ -error: Unexpected duplicated attribute - --> tests/pallet_ui/duplicate_store_attr.rs:29:3 - | -29 | #[pallet::generate_store(trait Store)] - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/substrate/frame/support/test/tests/pallet_ui/pallet_struct_invalid_attr.stderr b/substrate/frame/support/test/tests/pallet_ui/pallet_struct_invalid_attr.stderr index 33a2d1da78608c7d97e433d03c403cf4559a79b9..6f7f5617f7e5a8b40dc5e14f8684ccc256be1994 100644 --- a/substrate/frame/support/test/tests/pallet_ui/pallet_struct_invalid_attr.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/pallet_struct_invalid_attr.stderr @@ -1,4 +1,4 @@ -error: expected one of: `generate_store`, `without_storage_info`, `storage_version` +error: expected `without_storage_info` or `storage_version` --> tests/pallet_ui/pallet_struct_invalid_attr.rs:24:12 | 24 | #[pallet::generate_storage_info] // invalid diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_invalid_attribute.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_invalid_attribute.stderr index 7f125526edf26abddf82f2228da9a510200f0735..519fadaa6049cb26430719379d3f9847b81efb62 100644 --- a/substrate/frame/support/test/tests/pallet_ui/storage_invalid_attribute.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/storage_invalid_attribute.stderr @@ -1,4 +1,4 @@ -error: expected one of: `getter`, `storage_prefix`, `unbounded`, `whitelist_storage` +error: expected one of: `getter`, `storage_prefix`, `unbounded`, `whitelist_storage`, `disable_try_decode_storage` --> tests/pallet_ui/storage_invalid_attribute.rs:33:12 | 33 | #[pallet::generate_store(pub trait Store)] diff --git a/substrate/frame/support/test/tests/pallet_ui/store_trait_leak_private.stderr b/substrate/frame/support/test/tests/pallet_ui/store_trait_leak_private.stderr deleted file mode 100644 index ccb55122e8169e84f776dd41504738ab0f1772f5..0000000000000000000000000000000000000000 --- a/substrate/frame/support/test/tests/pallet_ui/store_trait_leak_private.stderr +++ /dev/null @@ -1,19 +0,0 @@ -error: use of deprecated struct `pallet::_::Store`: - Use of `#[pallet::generate_store(pub(super) trait Store)]` will be removed after July 2023. - Check https://github.com/paritytech/substrate/pull/13535 for more details. - --> tests/pallet_ui/store_trait_leak_private.rs:28:3 - | -28 | #[pallet::generate_store(pub trait Store)] - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - | - = note: `-D deprecated` implied by `-D warnings` - = help: to override `-D warnings` add `#[allow(deprecated)]` - -error[E0446]: private type `_GeneratedPrefixForStorageFoo` in public interface - --> tests/pallet_ui/store_trait_leak_private.rs:28:37 - | -28 | #[pallet::generate_store(pub trait Store)] - | ^^^^^ can't leak private type -... -37 | #[pallet::storage] - | ------- `_GeneratedPrefixForStorageFoo` declared as private diff --git a/substrate/frame/support/test/tests/construct_runtime.rs b/substrate/frame/support/test/tests/runtime.rs similarity index 89% rename from substrate/frame/support/test/tests/construct_runtime.rs rename to substrate/frame/support/test/tests/runtime.rs index b8341b25cb0985915706ccb68b870474118d33cb..115e3aa4c8a7e9429b3e7e4a7bc9dbba849b5b49 100644 --- a/substrate/frame/support/test/tests/construct_runtime.rs +++ b/substrate/frame/support/test/tests/runtime.rs @@ -30,7 +30,7 @@ use scale_info::TypeInfo; use sp_core::{sr25519, ConstU64}; use sp_runtime::{ generic, - traits::{BlakeTwo256, Verify}, + traits::{BlakeTwo256, ValidateUnsigned, Verify}, DispatchError, ModuleError, }; use sp_version::RuntimeVersion; @@ -176,6 +176,17 @@ mod nested { impl BuildGenesisConfig for GenesisConfig { fn build(&self) {} } + + #[pallet::validate_unsigned] + impl ValidateUnsigned for Pallet { + type Call = Call; + fn validate_unsigned( + _source: TransactionSource, + _call: &Self::Call, + ) -> TransactionValidity { + Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) + } + } } } @@ -247,6 +258,20 @@ pub mod module3 { impl BuildGenesisConfig for GenesisConfig { fn build(&self) {} } + + #[pallet::storage] + pub type Storage = StorageValue<_, u32>; + + #[pallet::validate_unsigned] + impl ValidateUnsigned for Pallet { + type Call = Call; + fn validate_unsigned( + _source: TransactionSource, + _call: &Self::Call, + ) -> TransactionValidity { + Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) + } + } } pub type BlockNumber = u64; @@ -256,24 +281,64 @@ pub type Header = generic::Header; pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; pub type Block = generic::Block; -frame_support::construct_runtime!( - pub struct Runtime - { - System: frame_system::{Pallet, Call, Event, Origin} = 30, - Module1_1: module1::::{Pallet, Call, Storage, Event, Origin}, - Module2: module2::{Pallet, Call, Storage, Event, Origin}, - Module1_2: module1::::{Pallet, Call, Storage, Event, Origin}, - NestedModule3: nested::module3::{Pallet, Call, Config, Storage, Event, Origin}, - Module3: self::module3::{Pallet, Call, Config, Storage, Event, Origin}, - Module1_3: module1::::{Pallet, Storage, Event } = 6, - Module1_4: module1::::{Pallet, Call, Event } = 3, - Module1_5: module1::::{Pallet, Event}, - Module1_6: module1::::{Pallet, Call, Storage, Event, Origin} = 1, - Module1_7: module1::::{Pallet, Call, Storage, Event, Origin}, - Module1_8: module1::::{Pallet, Call, Storage, Event, Origin} = 12, - Module1_9: module1::::{Pallet, Call, Storage, Event, Origin}, - } -); +#[frame_support::runtime] +mod runtime { + #[runtime::runtime] + #[runtime::derive( + RuntimeCall, + RuntimeEvent, + RuntimeError, + RuntimeOrigin, + RuntimeFreezeReason, + RuntimeHoldReason, + RuntimeSlashReason, + RuntimeLockId, + RuntimeTask + )] + pub struct Runtime; + + #[runtime::pallet_index(30)] + pub type System = frame_system + Pallet + Call + Event + Origin; + + #[runtime::pallet_index(31)] + pub type Module1_1 = module1; + + #[runtime::pallet_index(32)] + pub type Module2 = module2; + + #[runtime::pallet_index(33)] + pub type Module1_2 = module1; + + #[runtime::pallet_index(34)] + pub type NestedModule3 = nested::module3; + + #[runtime::pallet_index(35)] + #[runtime::disable_unsigned] + pub type Module3 = self::module3; + + #[runtime::pallet_index(6)] + #[runtime::disable_call] + pub type Module1_3 = module1; + + #[runtime::pallet_index(3)] + pub type Module1_4 = module1; + + #[runtime::pallet_index(4)] + #[runtime::disable_call] + pub type Module1_5 = module1; + + #[runtime::pallet_index(1)] + pub type Module1_6 = module1; + + #[runtime::pallet_index(2)] + pub type Module1_7 = module1; + + #[runtime::pallet_index(12)] + pub type Module1_8 = module1; + + #[runtime::pallet_index(13)] + pub type Module1_9 = module1; +} #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { @@ -605,17 +670,17 @@ fn get_module_names() { let module_names = RuntimeCall::get_module_names(); assert_eq!( [ + "Module1_6", + "Module1_7", + "Module1_4", + "Module1_8", + "Module1_9", "System", "Module1_1", "Module2", "Module1_2", "NestedModule3", "Module3", - "Module1_4", - "Module1_6", - "Module1_7", - "Module1_8", - "Module1_9", ], module_names ); @@ -636,9 +701,13 @@ fn call_subtype_conversion() { #[test] fn test_metadata() { - use frame_metadata::{v14::*, *}; + use frame_metadata::{ + v14::{StorageEntryType::Plain, *}, + *, + }; use scale_info::meta_type; use sp_core::Encode; + use sp_metadata_ir::StorageEntryModifierIR::Optional; fn maybe_docs(doc: Vec<&'static str>) -> Vec<&'static str> { if cfg!(feature = "no-metadata-docs") { @@ -649,6 +718,69 @@ fn test_metadata() { } let pallets = vec![ + PalletMetadata { + name: "Module1_6", + storage:None, + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![], + error: Some(meta_type::>().into()), + index: 1, + }, + PalletMetadata { + name: "Module1_7", + storage: None, + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![], + error: Some(meta_type::>().into()), + index: 2, + }, + PalletMetadata { + name: "Module1_4", + storage: None, + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![], + error: Some(meta_type::>().into()), + index: 3, + }, + PalletMetadata { + name: "Module1_5", + storage: None, + calls: None, + event: Some(meta_type::>().into()), + constants: vec![], + error: Some(meta_type::>().into()), + index: 4, + }, + PalletMetadata { + name: "Module1_3", + storage: None, + calls: None, + event: Some(meta_type::>().into()), + constants: vec![], + error: Some(meta_type::>().into()), + index: 6, + }, + PalletMetadata { + name: "Module1_8", + storage: None, + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![], + error: Some(meta_type::>().into()), + index: 12, + }, + PalletMetadata { + name: "Module1_9", + storage: None, + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![], + error: Some(meta_type::>().into()), + index: 13, + }, PalletMetadata { name: "System", storage: None, @@ -683,7 +815,7 @@ fn test_metadata() { name: "Version", ty: meta_type::(), value: RuntimeVersion::default().encode(), - docs: maybe_docs(vec![ " Get the chain's current version."]), + docs: maybe_docs(vec![ " Get the chain's in-code version."]), }, PalletConstantMetadata { name: "SS58Prefix", @@ -703,7 +835,7 @@ fn test_metadata() { }, PalletMetadata { name: "Module1_1", - storage: Some(PalletStorageMetadata { prefix: "Module1_1", entries: vec![] }), + storage: None, calls: Some(meta_type::>().into()), event: Some(meta_type::>().into()), constants: vec![], @@ -712,7 +844,7 @@ fn test_metadata() { }, PalletMetadata { name: "Module2", - storage: Some(PalletStorageMetadata { prefix: "Module2", entries: vec![] }), + storage: None, calls: Some(meta_type::>().into()), event: Some(meta_type::>().into()), constants: vec![], @@ -721,7 +853,7 @@ fn test_metadata() { }, PalletMetadata { name: "Module1_2", - storage: Some(PalletStorageMetadata { prefix: "Module1_2", entries: vec![] }), + storage: None, calls: Some(meta_type::>().into()), event: Some(meta_type::>().into()), constants: vec![], @@ -730,7 +862,7 @@ fn test_metadata() { }, PalletMetadata { name: "NestedModule3", - storage: Some(PalletStorageMetadata { prefix: "NestedModule3", entries: vec![] }), + storage: None, calls: Some(meta_type::>().into()), event: Some(meta_type::>().into()), constants: vec![], @@ -739,76 +871,24 @@ fn test_metadata() { }, PalletMetadata { name: "Module3", - storage: Some(PalletStorageMetadata { prefix: "Module3", entries: vec![] }), + storage: Some(PalletStorageMetadata { + prefix: "Module3", + entries: vec![ + StorageEntryMetadata { + name: "Storage", + modifier: Optional.into(), + ty: Plain(meta_type::().into()), + default: vec![0], + docs: vec![], + }, + ] + }), calls: Some(meta_type::>().into()), event: Some(meta_type::>().into()), constants: vec![], error: Some(meta_type::>().into()), index: 35, }, - PalletMetadata { - name: "Module1_3", - storage: Some(PalletStorageMetadata { prefix: "Module1_3", entries: vec![] }), - calls: None, - event: Some(meta_type::>().into()), - constants: vec![], - error: Some(meta_type::>().into()), - index: 6, - }, - PalletMetadata { - name: "Module1_4", - storage: None, - calls: Some(meta_type::>().into()), - event: Some(meta_type::>().into()), - constants: vec![], - error: Some(meta_type::>().into()), - index: 3, - }, - PalletMetadata { - name: "Module1_5", - storage: None, - calls: None, - event: Some(meta_type::>().into()), - constants: vec![], - error: Some(meta_type::>().into()), - index: 4, - }, - PalletMetadata { - name: "Module1_6", - storage: Some(PalletStorageMetadata { prefix: "Module1_6", entries: vec![] }), - calls: Some(meta_type::>().into()), - event: Some(meta_type::>().into()), - constants: vec![], - error: Some(meta_type::>().into()), - index: 1, - }, - PalletMetadata { - name: "Module1_7", - storage: Some(PalletStorageMetadata { prefix: "Module1_7", entries: vec![] }), - calls: Some(meta_type::>().into()), - event: Some(meta_type::>().into()), - constants: vec![], - error: Some(meta_type::>().into()), - index: 2, - }, - PalletMetadata { - name: "Module1_8", - storage: Some(PalletStorageMetadata { prefix: "Module1_8", entries: vec![] }), - calls: Some(meta_type::>().into()), - event: Some(meta_type::>().into()), - constants: vec![], - error: Some(meta_type::>().into()), - index: 12, - }, - PalletMetadata { - name: "Module1_9", - storage: Some(PalletStorageMetadata { prefix: "Module1_9", entries: vec![] }), - calls: Some(meta_type::>().into()), - event: Some(meta_type::>().into()), - constants: vec![], - error: Some(meta_type::>().into()), - index: 13, - }, ]; let extrinsic = ExtrinsicMetadata { @@ -895,3 +975,19 @@ fn pallet_in_runtime_is_correct() { assert_eq!(PalletInfo::module_name::().unwrap(), "module1"); assert!(PalletInfo::crate_version::().is_some()); } + +#[test] +fn test_validate_unsigned() { + use frame_support::pallet_prelude::*; + + let call = RuntimeCall::NestedModule3(nested::module3::Call::fail {}); + let validity = Runtime::validate_unsigned(TransactionSource::Local, &call).unwrap_err(); + assert_eq!(validity, TransactionValidityError::Invalid(InvalidTransaction::Call)); + + let call = RuntimeCall::Module3(module3::Call::fail {}); + let validity = Runtime::validate_unsigned(TransactionSource::Local, &call).unwrap_err(); + assert_eq!( + validity, + TransactionValidityError::Unknown(UnknownTransaction::NoUnsignedValidator) + ); +} diff --git a/substrate/frame/support/test/tests/runtime_legacy_ordering.rs b/substrate/frame/support/test/tests/runtime_legacy_ordering.rs new file mode 100644 index 0000000000000000000000000000000000000000..4c7012dca14979f0ed7f560bdc339146a676caa3 --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_legacy_ordering.rs @@ -0,0 +1,993 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! General tests for construct_runtime macro, test for: +//! * error declared with decl_error works +//! * integrity test is generated + +#![recursion_limit = "128"] + +use codec::MaxEncodedLen; +use frame_support::{ + derive_impl, parameter_types, traits::PalletInfo as _, weights::RuntimeDbWeight, +}; +use frame_system::limits::{BlockLength, BlockWeights}; +use scale_info::TypeInfo; +use sp_core::{sr25519, ConstU64}; +use sp_runtime::{ + generic, + traits::{BlakeTwo256, ValidateUnsigned, Verify}, + DispatchError, ModuleError, +}; +use sp_version::RuntimeVersion; + +parameter_types! { + pub static IntegrityTestExec: u32 = 0; +} + +#[frame_support::pallet(dev_mode)] +mod module1 { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config { + type RuntimeEvent: From> + + IsType<::RuntimeEvent>; + } + + #[pallet::call] + impl, I: 'static> Pallet { + pub fn fail(_origin: OriginFor) -> DispatchResult { + Err(Error::::Something.into()) + } + } + + #[pallet::origin] + #[derive(Clone, PartialEq, Eq, RuntimeDebug, Encode, Decode, MaxEncodedLen, TypeInfo)] + #[scale_info(skip_type_params(I))] + pub struct Origin(pub PhantomData<(T, I)>); + + #[pallet::event] + pub enum Event, I: 'static = ()> { + A(::AccountId), + } + + #[pallet::error] + pub enum Error { + Something, + } +} + +#[frame_support::pallet(dev_mode)] +mod module2 { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config { + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn integrity_test() { + IntegrityTestExec::mutate(|i| *i += 1); + } + } + + #[pallet::call] + impl Pallet { + pub fn fail(_origin: OriginFor) -> DispatchResult { + Err(Error::::Something.into()) + } + } + + #[pallet::origin] + #[derive(Clone, PartialEq, Eq, RuntimeDebug, Encode, Decode, MaxEncodedLen, TypeInfo)] + pub struct Origin; + + #[pallet::event] + pub enum Event { + A, + } + + #[pallet::error] + pub enum Error { + Something, + } +} + +mod nested { + use super::*; + + #[frame_support::pallet(dev_mode)] + pub mod module3 { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config { + type RuntimeEvent: From> + + IsType<::RuntimeEvent>; + } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn integrity_test() { + IntegrityTestExec::mutate(|i| *i += 1); + } + } + + #[pallet::call] + impl Pallet { + pub fn fail(_origin: OriginFor) -> DispatchResult { + Err(Error::::Something.into()) + } + } + + #[pallet::origin] + #[derive(Clone, PartialEq, Eq, RuntimeDebug, Encode, Decode, MaxEncodedLen, TypeInfo)] + pub struct Origin; + + #[pallet::event] + pub enum Event { + A, + } + + #[pallet::error] + pub enum Error { + Something, + } + + #[pallet::genesis_config] + #[derive(frame_support::DefaultNoBound)] + pub struct GenesisConfig { + #[serde(skip)] + pub _config: sp_std::marker::PhantomData, + } + + #[pallet::genesis_build] + impl BuildGenesisConfig for GenesisConfig { + fn build(&self) {} + } + + #[pallet::validate_unsigned] + impl ValidateUnsigned for Pallet { + type Call = Call; + fn validate_unsigned( + _source: TransactionSource, + _call: &Self::Call, + ) -> TransactionValidity { + Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) + } + } + } +} + +#[frame_support::pallet(dev_mode)] +pub mod module3 { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config { + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + } + + #[pallet::call] + impl Pallet { + pub fn fail(_origin: OriginFor) -> DispatchResult { + Err(Error::::Something.into()) + } + pub fn aux_1(_origin: OriginFor, #[pallet::compact] _data: u32) -> DispatchResult { + unreachable!() + } + pub fn aux_2( + _origin: OriginFor, + _data: i32, + #[pallet::compact] _data2: u32, + ) -> DispatchResult { + unreachable!() + } + #[pallet::weight(0)] + pub fn aux_3(_origin: OriginFor, _data: i32, _data2: String) -> DispatchResult { + unreachable!() + } + #[pallet::weight(3)] + pub fn aux_4(_origin: OriginFor) -> DispatchResult { + unreachable!() + } + #[pallet::weight((5, DispatchClass::Operational))] + pub fn operational(_origin: OriginFor) -> DispatchResult { + unreachable!() + } + } + + #[pallet::origin] + #[derive(Clone, PartialEq, Eq, RuntimeDebug, Encode, Decode, MaxEncodedLen, TypeInfo)] + pub struct Origin(pub PhantomData); + + #[pallet::event] + pub enum Event { + A, + } + + #[pallet::error] + pub enum Error { + Something, + } + + #[pallet::genesis_config] + #[derive(frame_support::DefaultNoBound)] + pub struct GenesisConfig { + #[serde(skip)] + pub _config: sp_std::marker::PhantomData, + } + + #[pallet::genesis_build] + impl BuildGenesisConfig for GenesisConfig { + fn build(&self) {} + } + + #[pallet::storage] + pub type Storage = StorageValue<_, u32>; + + #[pallet::validate_unsigned] + impl ValidateUnsigned for Pallet { + type Call = Call; + fn validate_unsigned( + _source: TransactionSource, + _call: &Self::Call, + ) -> TransactionValidity { + Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) + } + } +} + +pub type BlockNumber = u64; +pub type Signature = sr25519::Signature; +pub type AccountId = ::Signer; +pub type Header = generic::Header; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +pub type Block = generic::Block; + +#[frame_support::runtime(legacy_ordering)] +mod runtime { + #[runtime::runtime] + #[runtime::derive( + RuntimeCall, + RuntimeEvent, + RuntimeError, + RuntimeOrigin, + RuntimeFreezeReason, + RuntimeHoldReason, + RuntimeSlashReason, + RuntimeLockId, + RuntimeTask + )] + pub struct Runtime; + + #[runtime::pallet_index(30)] + pub type System = frame_system + Pallet + Call + Event + Origin; + + #[runtime::pallet_index(31)] + pub type Module1_1 = module1; + + #[runtime::pallet_index(32)] + pub type Module2 = module2; + + #[runtime::pallet_index(33)] + pub type Module1_2 = module1; + + #[runtime::pallet_index(34)] + pub type NestedModule3 = nested::module3; + + #[runtime::pallet_index(35)] + #[runtime::disable_unsigned] + pub type Module3 = self::module3; + + #[runtime::pallet_index(6)] + #[runtime::disable_call] + pub type Module1_3 = module1; + + #[runtime::pallet_index(3)] + pub type Module1_4 = module1; + + #[runtime::pallet_index(4)] + #[runtime::disable_call] + pub type Module1_5 = module1; + + #[runtime::pallet_index(1)] + pub type Module1_6 = module1; + + #[runtime::pallet_index(2)] + pub type Module1_7 = module1; + + #[runtime::pallet_index(12)] + pub type Module1_8 = module1; + + #[runtime::pallet_index(13)] + pub type Module1_9 = module1; +} + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Runtime { + type AccountId = AccountId; + type Lookup = sp_runtime::traits::IdentityLookup; + type BaseCallFilter = frame_support::traits::Everything; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type PalletInfo = PalletInfo; + type OnSetCode = (); + type Block = Block; + type BlockHashCount = ConstU64<10>; +} + +impl module1::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} +impl module1::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} +impl module1::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} +impl module1::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} +impl module1::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} +impl module1::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} +impl module1::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} +impl module1::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} +impl module1::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} +impl module2::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} +impl nested::module3::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} +impl module3::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} + +fn test_pub() -> AccountId { + AccountId::from_raw([0; 32]) +} + +#[test] +fn check_modules_error_type() { + sp_io::TestExternalities::default().execute_with(|| { + assert_eq!( + Module1_1::fail(frame_system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 31, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module2::fail(frame_system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 32, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_2::fail(frame_system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 33, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + NestedModule3::fail(frame_system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 34, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_3::fail(frame_system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 6, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_4::fail(frame_system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 3, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_5::fail(frame_system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 4, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_6::fail(frame_system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 1, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_7::fail(frame_system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 2, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_8::fail(frame_system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 12, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_9::fail(frame_system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 13, + error: [0; 4], + message: Some("Something") + })), + ); + }) +} + +#[test] +fn integrity_test_works() { + __construct_runtime_integrity_test::runtime_integrity_tests(); + assert_eq!(IntegrityTestExec::get(), 2); +} + +#[test] +fn origin_codec() { + use codec::Encode; + + let origin = OriginCaller::system(frame_system::RawOrigin::None); + assert_eq!(origin.encode()[0], 30); + + let origin = OriginCaller::Module1_1(module1::Origin(Default::default())); + assert_eq!(origin.encode()[0], 31); + + let origin = OriginCaller::Module2(module2::Origin); + assert_eq!(origin.encode()[0], 32); + + let origin = OriginCaller::Module1_2(module1::Origin(Default::default())); + assert_eq!(origin.encode()[0], 33); + + let origin = OriginCaller::NestedModule3(nested::module3::Origin); + assert_eq!(origin.encode()[0], 34); + + let origin = OriginCaller::Module3(module3::Origin(Default::default())); + assert_eq!(origin.encode()[0], 35); + + let origin = OriginCaller::Module1_6(module1::Origin(Default::default())); + assert_eq!(origin.encode()[0], 1); + + let origin = OriginCaller::Module1_7(module1::Origin(Default::default())); + assert_eq!(origin.encode()[0], 2); + + let origin = OriginCaller::Module1_8(module1::Origin(Default::default())); + assert_eq!(origin.encode()[0], 12); + + let origin = OriginCaller::Module1_9(module1::Origin(Default::default())); + assert_eq!(origin.encode()[0], 13); +} + +#[test] +fn event_codec() { + use codec::Encode; + + let event = + frame_system::Event::::ExtrinsicSuccess { dispatch_info: Default::default() }; + assert_eq!(RuntimeEvent::from(event).encode()[0], 30); + + let event = module1::Event::::A(test_pub()); + assert_eq!(RuntimeEvent::from(event).encode()[0], 31); + + let event = module2::Event::A; + assert_eq!(RuntimeEvent::from(event).encode()[0], 32); + + let event = module1::Event::::A(test_pub()); + assert_eq!(RuntimeEvent::from(event).encode()[0], 33); + + let event = nested::module3::Event::A; + assert_eq!(RuntimeEvent::from(event).encode()[0], 34); + + let event = module3::Event::A; + assert_eq!(RuntimeEvent::from(event).encode()[0], 35); + + let event = module1::Event::::A(test_pub()); + assert_eq!(RuntimeEvent::from(event).encode()[0], 4); + + let event = module1::Event::::A(test_pub()); + assert_eq!(RuntimeEvent::from(event).encode()[0], 1); + + let event = module1::Event::::A(test_pub()); + assert_eq!(RuntimeEvent::from(event).encode()[0], 2); + + let event = module1::Event::::A(test_pub()); + assert_eq!(RuntimeEvent::from(event).encode()[0], 12); + + let event = module1::Event::::A(test_pub()); + assert_eq!(RuntimeEvent::from(event).encode()[0], 13); +} + +#[test] +fn call_codec() { + use codec::Encode; + assert_eq!(RuntimeCall::System(frame_system::Call::remark { remark: vec![1] }).encode()[0], 30); + assert_eq!(RuntimeCall::Module1_1(module1::Call::fail {}).encode()[0], 31); + assert_eq!(RuntimeCall::Module2(module2::Call::fail {}).encode()[0], 32); + assert_eq!(RuntimeCall::Module1_2(module1::Call::fail {}).encode()[0], 33); + assert_eq!(RuntimeCall::NestedModule3(nested::module3::Call::fail {}).encode()[0], 34); + assert_eq!(RuntimeCall::Module3(module3::Call::fail {}).encode()[0], 35); + assert_eq!(RuntimeCall::Module1_4(module1::Call::fail {}).encode()[0], 3); + assert_eq!(RuntimeCall::Module1_6(module1::Call::fail {}).encode()[0], 1); + assert_eq!(RuntimeCall::Module1_7(module1::Call::fail {}).encode()[0], 2); + assert_eq!(RuntimeCall::Module1_8(module1::Call::fail {}).encode()[0], 12); + assert_eq!(RuntimeCall::Module1_9(module1::Call::fail {}).encode()[0], 13); +} + +#[test] +fn call_compact_attr() { + use codec::Encode; + let call: module3::Call = module3::Call::aux_1 { data: 1 }; + let encoded = call.encode(); + assert_eq!(2, encoded.len()); + assert_eq!(vec![1, 4], encoded); + + let call: module3::Call = module3::Call::aux_2 { data: 1, data2: 2 }; + let encoded = call.encode(); + assert_eq!(6, encoded.len()); + assert_eq!(vec![2, 1, 0, 0, 0, 8], encoded); +} + +#[test] +fn call_encode_is_correct_and_decode_works() { + use codec::{Decode, Encode}; + let call: module3::Call = module3::Call::fail {}; + let encoded = call.encode(); + assert_eq!(vec![0], encoded); + let decoded = module3::Call::::decode(&mut &encoded[..]).unwrap(); + assert_eq!(decoded, call); + + let call: module3::Call = module3::Call::aux_3 { data: 32, data2: "hello".into() }; + let encoded = call.encode(); + assert_eq!(vec![3, 32, 0, 0, 0, 20, 104, 101, 108, 108, 111], encoded); + let decoded = module3::Call::::decode(&mut &encoded[..]).unwrap(); + assert_eq!(decoded, call); +} + +#[test] +fn call_weight_should_attach_to_call_enum() { + use frame_support::{ + dispatch::{DispatchClass, DispatchInfo, GetDispatchInfo, Pays}, + weights::Weight, + }; + // operational. + assert_eq!( + module3::Call::::operational {}.get_dispatch_info(), + DispatchInfo { + weight: Weight::from_parts(5, 0), + class: DispatchClass::Operational, + pays_fee: Pays::Yes + }, + ); + // custom basic + assert_eq!( + module3::Call::::aux_4 {}.get_dispatch_info(), + DispatchInfo { + weight: Weight::from_parts(3, 0), + class: DispatchClass::Normal, + pays_fee: Pays::Yes + }, + ); +} + +#[test] +fn call_name() { + use frame_support::traits::GetCallName; + let name = module3::Call::::aux_4 {}.get_call_name(); + assert_eq!("aux_4", name); +} + +#[test] +fn call_metadata() { + use frame_support::traits::{CallMetadata, GetCallMetadata}; + let call = RuntimeCall::Module3(module3::Call::::aux_4 {}); + let metadata = call.get_call_metadata(); + let expected = CallMetadata { function_name: "aux_4".into(), pallet_name: "Module3".into() }; + assert_eq!(metadata, expected); +} + +#[test] +fn get_call_names() { + use frame_support::traits::GetCallName; + let call_names = module3::Call::::get_call_names(); + assert_eq!(["fail", "aux_1", "aux_2", "aux_3", "aux_4", "operational"], call_names); +} + +#[test] +fn get_module_names() { + use frame_support::traits::GetCallMetadata; + let module_names = RuntimeCall::get_module_names(); + assert_eq!( + [ + "System", + "Module1_1", + "Module2", + "Module1_2", + "NestedModule3", + "Module3", + "Module1_4", + "Module1_6", + "Module1_7", + "Module1_8", + "Module1_9", + ], + module_names + ); +} + +#[test] +fn call_subtype_conversion() { + use frame_support::{dispatch::CallableCallFor, traits::IsSubType}; + let call = RuntimeCall::Module3(module3::Call::::fail {}); + let subcall: Option<&CallableCallFor> = call.is_sub_type(); + let subcall_none: Option<&CallableCallFor> = call.is_sub_type(); + assert_eq!(Some(&module3::Call::::fail {}), subcall); + assert_eq!(None, subcall_none); + + let from = RuntimeCall::from(subcall.unwrap().clone()); + assert_eq!(from, call); +} + +#[test] +fn test_metadata() { + use frame_metadata::{ + v14::{StorageEntryType::Plain, *}, + *, + }; + use scale_info::meta_type; + use sp_core::Encode; + use sp_metadata_ir::StorageEntryModifierIR::Optional; + + fn maybe_docs(doc: Vec<&'static str>) -> Vec<&'static str> { + if cfg!(feature = "no-metadata-docs") { + vec![] + } else { + doc + } + } + + let pallets = vec![ + PalletMetadata { + name: "System", + storage: None, + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![ + PalletConstantMetadata { + name: "BlockWeights", + ty: meta_type::(), + value: BlockWeights::default().encode(), + docs: maybe_docs(vec![" Block & extrinsics weights: base values and limits."]), + }, + PalletConstantMetadata { + name: "BlockLength", + ty: meta_type::(), + value: BlockLength::default().encode(), + docs: maybe_docs(vec![" The maximum length of a block (in bytes)."]), + }, + PalletConstantMetadata { + name: "BlockHashCount", + ty: meta_type::(), + value: 10u64.encode(), + docs: maybe_docs(vec![" Maximum number of block number to block hash mappings to keep (oldest pruned first)."]), + }, + PalletConstantMetadata { + name: "DbWeight", + ty: meta_type::(), + value: RuntimeDbWeight::default().encode(), + docs: maybe_docs(vec![" The weight of runtime database operations the runtime can invoke.",]), + }, + PalletConstantMetadata { + name: "Version", + ty: meta_type::(), + value: RuntimeVersion::default().encode(), + docs: maybe_docs(vec![ " Get the chain's in-code version."]), + }, + PalletConstantMetadata { + name: "SS58Prefix", + ty: meta_type::(), + value: 0u16.encode(), + docs: maybe_docs(vec![ + " The designated SS58 prefix of this chain.", + "", + " This replaces the \"ss58Format\" property declared in the chain spec. Reason is", + " that the runtime should know about the prefix in order to make use of it as", + " an identifier of the chain.", + ]), + }, + ], + error: Some(meta_type::>().into()), + index: 30, + }, + PalletMetadata { + name: "Module1_1", + storage: None, + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![], + error: Some(meta_type::>().into()), + index: 31, + }, + PalletMetadata { + name: "Module2", + storage: None, + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![], + error: Some(meta_type::>().into()), + index: 32, + }, + PalletMetadata { + name: "Module1_2", + storage: None, + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![], + error: Some(meta_type::>().into()), + index: 33, + }, + PalletMetadata { + name: "NestedModule3", + storage: None, + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![], + error: Some(meta_type::>().into()), + index: 34, + }, + PalletMetadata { + name: "Module3", + storage: Some(PalletStorageMetadata { + prefix: "Module3", + entries: vec![ + StorageEntryMetadata { + name: "Storage", + modifier: Optional.into(), + ty: Plain(meta_type::().into()), + default: vec![0], + docs: vec![], + }, + ] + }), + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![], + error: Some(meta_type::>().into()), + index: 35, + }, + PalletMetadata { + name: "Module1_3", + storage: None, + calls: None, + event: Some(meta_type::>().into()), + constants: vec![], + error: Some(meta_type::>().into()), + index: 6, + }, + PalletMetadata { + name: "Module1_4", + storage: None, + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![], + error: Some(meta_type::>().into()), + index: 3, + }, + PalletMetadata { + name: "Module1_5", + storage: None, + calls: None, + event: Some(meta_type::>().into()), + constants: vec![], + error: Some(meta_type::>().into()), + index: 4, + }, + PalletMetadata { + name: "Module1_6", + storage:None, + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![], + error: Some(meta_type::>().into()), + index: 1, + }, + PalletMetadata { + name: "Module1_7", + storage: None, + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![], + error: Some(meta_type::>().into()), + index: 2, + }, + PalletMetadata { + name: "Module1_8", + storage: None, + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![], + error: Some(meta_type::>().into()), + index: 12, + }, + PalletMetadata { + name: "Module1_9", + storage: None, + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![], + error: Some(meta_type::>().into()), + index: 13, + }, + ]; + + let extrinsic = ExtrinsicMetadata { + ty: meta_type::(), + version: 4, + signed_extensions: vec![SignedExtensionMetadata { + identifier: "UnitSignedExtension", + ty: meta_type::<()>(), + additional_signed: meta_type::<()>(), + }], + }; + + let expected_metadata: RuntimeMetadataPrefixed = + RuntimeMetadataLastVersion::new(pallets, extrinsic, meta_type::()).into(); + let actual_metadata = Runtime::metadata(); + + pretty_assertions::assert_eq!(actual_metadata, expected_metadata); +} + +#[test] +fn pallet_in_runtime_is_correct() { + assert_eq!(PalletInfo::index::().unwrap(), 30); + assert_eq!(PalletInfo::name::().unwrap(), "System"); + assert_eq!(PalletInfo::module_name::().unwrap(), "frame_system"); + assert!(PalletInfo::crate_version::().is_some()); + + assert_eq!(PalletInfo::index::().unwrap(), 31); + assert_eq!(PalletInfo::name::().unwrap(), "Module1_1"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module1"); + assert!(PalletInfo::crate_version::().is_some()); + + assert_eq!(PalletInfo::index::().unwrap(), 32); + assert_eq!(PalletInfo::name::().unwrap(), "Module2"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module2"); + assert!(PalletInfo::crate_version::().is_some()); + + assert_eq!(PalletInfo::index::().unwrap(), 33); + assert_eq!(PalletInfo::name::().unwrap(), "Module1_2"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module1"); + assert!(PalletInfo::crate_version::().is_some()); + + assert_eq!(PalletInfo::index::().unwrap(), 34); + assert_eq!(PalletInfo::name::().unwrap(), "NestedModule3"); + assert_eq!(PalletInfo::module_name::().unwrap(), "nested::module3"); + assert!(PalletInfo::crate_version::().is_some()); + + assert_eq!(PalletInfo::index::().unwrap(), 35); + assert_eq!(PalletInfo::name::().unwrap(), "Module3"); + assert_eq!(PalletInfo::module_name::().unwrap(), "self::module3"); + assert!(PalletInfo::crate_version::().is_some()); + + assert_eq!(PalletInfo::index::().unwrap(), 6); + assert_eq!(PalletInfo::name::().unwrap(), "Module1_3"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module1"); + assert!(PalletInfo::crate_version::().is_some()); + + assert_eq!(PalletInfo::index::().unwrap(), 3); + assert_eq!(PalletInfo::name::().unwrap(), "Module1_4"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module1"); + assert!(PalletInfo::crate_version::().is_some()); + + assert_eq!(PalletInfo::index::().unwrap(), 4); + assert_eq!(PalletInfo::name::().unwrap(), "Module1_5"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module1"); + assert!(PalletInfo::crate_version::().is_some()); + + assert_eq!(PalletInfo::index::().unwrap(), 1); + assert_eq!(PalletInfo::name::().unwrap(), "Module1_6"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module1"); + assert!(PalletInfo::crate_version::().is_some()); + + assert_eq!(PalletInfo::index::().unwrap(), 2); + assert_eq!(PalletInfo::name::().unwrap(), "Module1_7"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module1"); + assert!(PalletInfo::crate_version::().is_some()); + + assert_eq!(PalletInfo::index::().unwrap(), 12); + assert_eq!(PalletInfo::name::().unwrap(), "Module1_8"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module1"); + assert!(PalletInfo::crate_version::().is_some()); + + assert_eq!(PalletInfo::index::().unwrap(), 13); + assert_eq!(PalletInfo::name::().unwrap(), "Module1_9"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module1"); + assert!(PalletInfo::crate_version::().is_some()); +} + +#[test] +fn test_validate_unsigned() { + use frame_support::pallet_prelude::*; + + let call = RuntimeCall::NestedModule3(nested::module3::Call::fail {}); + let validity = Runtime::validate_unsigned(TransactionSource::Local, &call).unwrap_err(); + assert_eq!(validity, TransactionValidityError::Invalid(InvalidTransaction::Call)); + + let call = RuntimeCall::Module3(module3::Call::fail {}); + let validity = Runtime::validate_unsigned(TransactionSource::Local, &call).unwrap_err(); + assert_eq!( + validity, + TransactionValidityError::Unknown(UnknownTransaction::NoUnsignedValidator) + ); +} diff --git a/substrate/frame/support/test/tests/runtime_metadata.rs b/substrate/frame/support/test/tests/runtime_metadata.rs index bb7f7d2822e7cf1a15c41de1ca47686dd8440bfc..40e70b219ba94062af2bb75391628c73bc3a3233 100644 --- a/substrate/frame/support/test/tests/runtime_metadata.rs +++ b/substrate/frame/support/test/tests/runtime_metadata.rs @@ -101,7 +101,7 @@ sp_api::impl_runtime_apis! { fn execute_block(_: Block) { unimplemented!() } - fn initialize_block(_: &::Header) { + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { unimplemented!() } } @@ -200,8 +200,8 @@ fn runtime_metadata() { name: "header", ty: meta_type::<&::Header>(), }], - output: meta_type::<()>(), - docs: maybe_docs(vec![" Initialize a block with the given header."]), + output: meta_type::(), + docs: maybe_docs(vec![" Initialize a block with the given header and return the runtime executive mode."]), }, ], docs: maybe_docs(vec![ diff --git a/substrate/frame/support/test/tests/runtime_ui.rs b/substrate/frame/support/test/tests/runtime_ui.rs new file mode 100644 index 0000000000000000000000000000000000000000..dbe150f38ed6a3b6a45ac58e288b65c397c671f9 --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui.rs @@ -0,0 +1,36 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[rustversion::attr(not(stable), ignore)] +#[cfg(not(feature = "disable-ui-tests"))] +#[test] +fn ui() { + // Only run the ui tests when `RUN_UI_TESTS` is set. + if std::env::var("RUN_UI_TESTS").is_err() { + return + } + + // As trybuild is using `cargo check`, we don't need the real WASM binaries. + std::env::set_var("SKIP_WASM_BUILD", "1"); + + // Deny all warnings since we emit warnings as part of a Runtime's UI. + std::env::set_var("RUSTFLAGS", "--deny warnings"); + + let t = trybuild::TestCases::new(); + t.compile_fail("tests/runtime_ui/*.rs"); + t.pass("tests/runtime_ui/pass/*.rs"); +} diff --git a/substrate/frame/contracts/fixtures/contracts/invalid_contract_no_call.rs b/substrate/frame/support/test/tests/runtime_ui/can_only_be_attached_to_mod.rs similarity index 80% rename from substrate/frame/contracts/fixtures/contracts/invalid_contract_no_call.rs rename to substrate/frame/support/test/tests/runtime_ui/can_only_be_attached_to_mod.rs index 13af3eb22b1f5fd10c3e2b60061d3f7a6cc37c94..eb5868090418a3e10320cc4394c565d054926710 100644 --- a/substrate/frame/contracts/fixtures/contracts/invalid_contract_no_call.rs +++ b/substrate/frame/support/test/tests/runtime_ui/can_only_be_attached_to_mod.rs @@ -14,12 +14,8 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -//! Valid module but missing the call function -#![no_std] -#![no_main] -extern crate common; +#[frame_support::runtime] +fn construct_runtime() {} -#[no_mangle] -#[polkavm_derive::polkavm_export] -pub extern "C" fn deploy() {} +fn main() {} diff --git a/substrate/frame/support/test/tests/runtime_ui/can_only_be_attached_to_mod.stderr b/substrate/frame/support/test/tests/runtime_ui/can_only_be_attached_to_mod.stderr new file mode 100644 index 0000000000000000000000000000000000000000..7c2f0d1f1699502c8847f308e7978d988552cc47 --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/can_only_be_attached_to_mod.stderr @@ -0,0 +1,5 @@ +error: expected `mod` + --> tests/runtime_ui/can_only_be_attached_to_mod.rs:19:1 + | +19 | fn construct_runtime() {} + | ^^ diff --git a/substrate/frame/support/test/tests/pallet_ui/store_trait_leak_private.rs b/substrate/frame/support/test/tests/runtime_ui/conflicting_pallet_index.rs similarity index 58% rename from substrate/frame/support/test/tests/pallet_ui/store_trait_leak_private.rs rename to substrate/frame/support/test/tests/runtime_ui/conflicting_pallet_index.rs index 55dd315fb297cdf55802e25f48b53364b394dd3d..648812b52c4fe8b307b93a4e702b1ff98547e613 100644 --- a/substrate/frame/support/test/tests/pallet_ui/store_trait_leak_private.rs +++ b/substrate/frame/support/test/tests/runtime_ui/conflicting_pallet_index.rs @@ -17,26 +17,27 @@ #[frame_support::pallet] mod pallet { - use frame_support::pallet_prelude::Hooks; - use frame_system::pallet_prelude::BlockNumberFor; - use frame_support::pallet_prelude::StorageValue; + #[pallet::config] + pub trait Config: frame_system::Config {} - #[pallet::config] - pub trait Config: frame_system::Config {} + #[pallet::pallet] + pub struct Pallet(_); - #[pallet::pallet] - #[pallet::generate_store(pub trait Store)] - pub struct Pallet(core::marker::PhantomData); + #[pallet::call] + impl Pallet {} +} - #[pallet::hooks] - impl Hooks> for Pallet {} +#[frame_support::runtime] +mod runtime { + #[runtime::runtime] + #[runtime::derive(RuntimeCall)] + pub struct Runtime; - #[pallet::call] - impl Pallet {} + #[runtime::pallet_index(0)] + pub type System = frame_system; - #[pallet::storage] - type Foo = StorageValue<_, u8>; + #[runtime::pallet_index(0)] + pub type Pallet = pallet; } -fn main() { -} +fn main() {} diff --git a/substrate/frame/support/test/tests/runtime_ui/conflicting_pallet_index.stderr b/substrate/frame/support/test/tests/runtime_ui/conflicting_pallet_index.stderr new file mode 100644 index 0000000000000000000000000000000000000000..8963c5d3f265d1efb6be5fb1b820285f86d4245b --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/conflicting_pallet_index.stderr @@ -0,0 +1,11 @@ +error: Pallet indices are conflicting: Both pallets System and Pallet are at index 0 + --> tests/runtime_ui/conflicting_pallet_index.rs:37:14 + | +37 | pub type System = frame_system; + | ^^^^^^ + +error: Pallet indices are conflicting: Both pallets System and Pallet are at index 0 + --> tests/runtime_ui/conflicting_pallet_index.rs:40:14 + | +40 | pub type Pallet = pallet; + | ^^^^^^ diff --git a/substrate/frame/support/test/tests/runtime_ui/conflicting_pallet_name.rs b/substrate/frame/support/test/tests/runtime_ui/conflicting_pallet_name.rs new file mode 100644 index 0000000000000000000000000000000000000000..c68f9a8082b47fcd3bf306187c8f511392baf95b --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/conflicting_pallet_name.rs @@ -0,0 +1,43 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet] +mod pallet { + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::call] + impl Pallet {} +} + +#[frame_support::runtime] +mod runtime { + #[runtime::runtime] + #[runtime::derive(RuntimeCall)] + pub struct Runtime; + + #[runtime::pallet_index(0)] + pub type System = frame_system; + + #[runtime::pallet_index(1)] + pub type System = pallet; +} + +fn main() {} diff --git a/substrate/frame/support/test/tests/runtime_ui/conflicting_pallet_name.stderr b/substrate/frame/support/test/tests/runtime_ui/conflicting_pallet_name.stderr new file mode 100644 index 0000000000000000000000000000000000000000..c250c24e3e704136e380f91b0e74eb3c4fd4d7b7 --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/conflicting_pallet_name.stderr @@ -0,0 +1,11 @@ +error: Two pallets with the same name! + --> tests/runtime_ui/conflicting_pallet_name.rs:37:14 + | +37 | pub type System = frame_system; + | ^^^^^^ + +error: Two pallets with the same name! + --> tests/runtime_ui/conflicting_pallet_name.rs:40:14 + | +40 | pub type System = pallet; + | ^^^^^^ diff --git a/substrate/frame/support/test/tests/pallet_ui/deprecated_store_attr.rs b/substrate/frame/support/test/tests/runtime_ui/invalid_attribute.rs similarity index 76% rename from substrate/frame/support/test/tests/pallet_ui/deprecated_store_attr.rs rename to substrate/frame/support/test/tests/runtime_ui/invalid_attribute.rs index 72ad7896dfec572014cb6b10dcc39bc417a275b6..5df2491cb24ba15000324efdb853e7e38af1b774 100644 --- a/substrate/frame/support/test/tests/pallet_ui/deprecated_store_attr.rs +++ b/substrate/frame/support/test/tests/runtime_ui/invalid_attribute.rs @@ -15,14 +15,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -#[frame_support::pallet] -mod pallet { - #[pallet::config] - pub trait Config: frame_system::Config {} +#[frame_support::runtime(dummy)] +mod runtime { - #[pallet::pallet] - #[pallet::generate_store(trait Store)] - pub struct Pallet(core::marker::PhantomData); } fn main() {} diff --git a/substrate/frame/support/test/tests/runtime_ui/invalid_attribute.stderr b/substrate/frame/support/test/tests/runtime_ui/invalid_attribute.stderr new file mode 100644 index 0000000000000000000000000000000000000000..e13552413c00a094bf37cb54f0f21db4e0030231 --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/invalid_attribute.stderr @@ -0,0 +1,5 @@ +error: Invalid runtime macro call: unexpected attribute. Macro call must be bare, such as `#[frame_support::runtime]` or `#[runtime]`, or must specify the `legacy_ordering` attribute, such as `#[frame_support::runtime(legacy_ordering)]` or #[runtime(legacy_ordering)]. + --> tests/runtime_ui/invalid_attribute.rs:18:26 + | +18 | #[frame_support::runtime(dummy)] + | ^^^^^ diff --git a/substrate/frame/support/test/tests/runtime_ui/invalid_pallet_index.rs b/substrate/frame/support/test/tests/runtime_ui/invalid_pallet_index.rs new file mode 100644 index 0000000000000000000000000000000000000000..de89bb60224fd8fd77ac7c68f62353c6e51aafe6 --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/invalid_pallet_index.rs @@ -0,0 +1,28 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::runtime] +mod runtime { + #[runtime::runtime] + #[runtime::derive(RuntimeCall)] + pub struct Runtime; + + #[runtime::pallet_index("0")] + pub type System = frame_system; +} + +fn main() {} diff --git a/substrate/frame/support/test/tests/runtime_ui/invalid_pallet_index.stderr b/substrate/frame/support/test/tests/runtime_ui/invalid_pallet_index.stderr new file mode 100644 index 0000000000000000000000000000000000000000..1fbd086ddbe7e243dff03df1a7e91dc9d911964d --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/invalid_pallet_index.stderr @@ -0,0 +1,5 @@ +error: expected integer literal + --> tests/runtime_ui/invalid_pallet_index.rs:24:29 + | +24 | #[runtime::pallet_index("0")] + | ^^^ diff --git a/substrate/frame/support/test/tests/runtime_ui/invalid_runtime_type_derive.rs b/substrate/frame/support/test/tests/runtime_ui/invalid_runtime_type_derive.rs new file mode 100644 index 0000000000000000000000000000000000000000..89ba2f23bb1afbde0ff2936acd7910b2b761fff7 --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/invalid_runtime_type_derive.rs @@ -0,0 +1,25 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::runtime] +mod runtime { + #[runtime::runtime] + #[runtime::derive(RuntimeInfo)] + pub struct Runtime; +} + +fn main() {} diff --git a/substrate/frame/support/test/tests/runtime_ui/invalid_runtime_type_derive.stderr b/substrate/frame/support/test/tests/runtime_ui/invalid_runtime_type_derive.stderr new file mode 100644 index 0000000000000000000000000000000000000000..0b128c3dd4579ba93f7950f42f0ff97440eadd38 --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/invalid_runtime_type_derive.stderr @@ -0,0 +1,5 @@ +error: expected one of: `RuntimeCall`, `RuntimeEvent`, `RuntimeError`, `RuntimeOrigin`, `RuntimeFreezeReason`, `RuntimeHoldReason`, `RuntimeSlashReason`, `RuntimeLockId`, `RuntimeTask` + --> tests/runtime_ui/invalid_runtime_type_derive.rs:21:23 + | +21 | #[runtime::derive(RuntimeInfo)] + | ^^^^^^^^^^^ diff --git a/substrate/frame/support/test/tests/runtime_ui/missing_runtime.rs b/substrate/frame/support/test/tests/runtime_ui/missing_runtime.rs new file mode 100644 index 0000000000000000000000000000000000000000..c5febbe52876008e8a0537fece2b85806d6b323d --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/missing_runtime.rs @@ -0,0 +1,21 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::runtime] +mod runtime {} + +fn main() {} diff --git a/substrate/frame/support/test/tests/runtime_ui/missing_runtime.stderr b/substrate/frame/support/test/tests/runtime_ui/missing_runtime.stderr new file mode 100644 index 0000000000000000000000000000000000000000..9790c5dbc1a9a3f16269c4adea0b2c558bf13c08 --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/missing_runtime.stderr @@ -0,0 +1,5 @@ +error: Missing Runtime. Please add a struct inside the module and annotate it with `#[runtime::runtime]` + --> tests/runtime_ui/missing_runtime.rs:19:1 + | +19 | mod runtime {} + | ^^^ diff --git a/substrate/frame/support/test/tests/runtime_ui/missing_runtime_types_derive.rs b/substrate/frame/support/test/tests/runtime_ui/missing_runtime_types_derive.rs new file mode 100644 index 0000000000000000000000000000000000000000..c3fb0fd454f023fda6b442a5411087caea172c17 --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/missing_runtime_types_derive.rs @@ -0,0 +1,24 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::runtime] +mod runtime { + #[runtime::runtime] + pub struct Runtime; +} + +fn main() {} diff --git a/substrate/frame/support/test/tests/runtime_ui/missing_runtime_types_derive.stderr b/substrate/frame/support/test/tests/runtime_ui/missing_runtime_types_derive.stderr new file mode 100644 index 0000000000000000000000000000000000000000..1a8deebe5497c9c463f52140b8893b9c12529e52 --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/missing_runtime_types_derive.stderr @@ -0,0 +1,5 @@ +error: Missing Runtime Types. Please annotate the runtime struct with `#[runtime::derive]` + --> tests/runtime_ui/missing_runtime_types_derive.rs:19:1 + | +19 | mod runtime { + | ^^^ diff --git a/substrate/frame/support/test/tests/runtime_ui/missing_system_pallet.rs b/substrate/frame/support/test/tests/runtime_ui/missing_system_pallet.rs new file mode 100644 index 0000000000000000000000000000000000000000..f9a15fcf992a459313495562316cfa7c0ff01fde --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/missing_system_pallet.rs @@ -0,0 +1,25 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::runtime] +mod runtime { + #[runtime::runtime] + #[runtime::derive(RuntimeCall)] + pub struct Runtime; +} + +fn main() {} diff --git a/substrate/frame/support/test/tests/runtime_ui/missing_system_pallet.stderr b/substrate/frame/support/test/tests/runtime_ui/missing_system_pallet.stderr new file mode 100644 index 0000000000000000000000000000000000000000..dc3ac4cd5d560d208da05556f256498fd838da37 --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/missing_system_pallet.stderr @@ -0,0 +1,5 @@ +error: `System` pallet declaration is missing. Please add this line: `pub type System = frame_system;` + --> tests/runtime_ui/missing_system_pallet.rs:19:5 + | +19 | mod runtime { + | ^^^^^^^ diff --git a/substrate/frame/support/test/tests/runtime_ui/pass/basic.rs b/substrate/frame/support/test/tests/runtime_ui/pass/basic.rs new file mode 100644 index 0000000000000000000000000000000000000000..514f150180153692caf55ba9b3ecb171ca4e1a2a --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/pass/basic.rs @@ -0,0 +1,37 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame_support::derive_impl; + +pub type Block = frame_system::mocking::MockBlock; + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Runtime { + type Block = Block; +} + +#[frame_support::runtime] +mod runtime { + #[runtime::runtime] + #[runtime::derive(RuntimeCall, RuntimeEvent, RuntimeOrigin, RuntimeError, RuntimeTask)] + pub struct Runtime; + + #[runtime::pallet_index(0)] + pub type System = frame_system; +} + +fn main() {} diff --git a/substrate/frame/support/test/tests/runtime_ui/runtime_struct.rs b/substrate/frame/support/test/tests/runtime_ui/runtime_struct.rs new file mode 100644 index 0000000000000000000000000000000000000000..bfa2fb75f04b8ce04d5aabdc83e36414f2dd3c00 --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/runtime_struct.rs @@ -0,0 +1,24 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::runtime] +mod runtime { + #[runtime::runtime] + pub enum Runtime {} +} + +fn main() {} diff --git a/substrate/frame/support/test/tests/runtime_ui/runtime_struct.stderr b/substrate/frame/support/test/tests/runtime_ui/runtime_struct.stderr new file mode 100644 index 0000000000000000000000000000000000000000..04192bcf7f192c833893f8a57536961fcd9b32ca --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/runtime_struct.stderr @@ -0,0 +1,5 @@ +error: Invalid runtime::runtime, expected struct definition + --> tests/runtime_ui/runtime_struct.rs:21:5 + | +21 | pub enum Runtime {} + | ^^^ diff --git a/substrate/frame/support/test/tests/storage_transaction.rs b/substrate/frame/support/test/tests/storage_transaction.rs index c47743308609850ae480f3194773a76c36bbba58..f1489a8b0a6b3d9696249bc006d5fd10c13e36c9 100644 --- a/substrate/frame/support/test/tests/storage_transaction.rs +++ b/substrate/frame/support/test/tests/storage_transaction.rs @@ -41,7 +41,6 @@ pub mod pallet { use frame_system::pallet_prelude::*; #[pallet::pallet] - #[pallet::generate_store(pub (super) trait Store)] pub struct Pallet(_); #[pallet::config] diff --git a/substrate/frame/system/Cargo.toml b/substrate/frame/system/Cargo.toml index b852aa99b97ad9e149ad5883d136a0fe7d8923b4..416969e9c4776aa161dde65281b7847156bdbf56 100644 --- a/substrate/frame/system/Cargo.toml +++ b/substrate/frame/system/Cargo.toml @@ -20,7 +20,7 @@ cfg-if = "1.0" codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } -serde = { version = "1.0.196", default-features = false, features = ["alloc", "derive"] } +serde = { features = ["alloc", "derive"], workspace = true } frame-support = { path = "../support", default-features = false } sp-core = { path = "../../primitives/core", default-features = false, features = ["serde"] } sp-io = { path = "../../primitives/io", default-features = false } diff --git a/substrate/frame/system/benchmarking/src/lib.rs b/substrate/frame/system/benchmarking/src/lib.rs index 18bfb85f52dfd4c2b6a5c8a57e78494d97fbdeeb..29100faa75142a8b945bf15f1870e9c3d685d35e 100644 --- a/substrate/frame/system/benchmarking/src/lib.rs +++ b/substrate/frame/system/benchmarking/src/lib.rs @@ -21,7 +21,7 @@ #![cfg(feature = "runtime-benchmarks")] use codec::Encode; -use frame_benchmarking::{impl_benchmark_test_suite, v2::*}; +use frame_benchmarking::v2::*; use frame_support::{dispatch::DispatchClass, storage, traits::Get}; use frame_system::{Call, Pallet as System, RawOrigin}; use sp_core::storage::well_known_keys; diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index 069217bcee46b4663c836a750bfd90a568c1a4d2..184f27b61ed2a78fa298e4883db6d4eb39d8a184 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -130,11 +130,13 @@ use frame_support::{ DispatchResult, DispatchResultWithPostInfo, PerDispatchClass, PostDispatchInfo, }, ensure, impl_ensure_origin_with_arg_ignoring_arg, + migrations::MultiStepMigrator, pallet_prelude::Pays, storage::{self, StorageStreamIter}, traits::{ ConstU32, Contains, EnsureOrigin, EnsureOriginWithArg, Get, HandleLifetime, - OnKilledAccount, OnNewAccount, OriginTrait, PalletInfo, SortedMembers, StoredMap, TypedGet, + OnKilledAccount, OnNewAccount, OnRuntimeUpgrade, OriginTrait, PalletInfo, SortedMembers, + StoredMap, TypedGet, }, Parameter, }; @@ -148,6 +150,7 @@ use sp_io::TestExternalities; pub mod limits; #[cfg(test)] pub(crate) mod mock; + pub mod offchain; mod extensions; @@ -168,6 +171,7 @@ pub use extensions::{ // Backward compatible re-export. pub use extensions::check_mortality::CheckMortality as CheckEra; pub use frame_support::dispatch::RawOrigin; +use frame_support::traits::{PostInherents, PostTransactions, PreInherents}; pub use weights::WeightInfo; const LOG_TARGET: &str = "runtime::system"; @@ -298,9 +302,14 @@ pub mod pallet { type BaseCallFilter = frame_support::traits::Everything; type BlockHashCount = frame_support::traits::ConstU64<10>; type OnSetCode = (); + type SingleBlockMigrations = (); + type MultiBlockMigrator = (); + type PreInherents = (); + type PostInherents = (); + type PostTransactions = (); } - /// Default configurations of this pallet in a solo-chain environment. + /// Default configurations of this pallet in a solochain environment. /// /// ## Considerations: /// @@ -392,6 +401,11 @@ pub mod pallet { /// The set code logic, just the default since we're not a parachain. type OnSetCode = (); + type SingleBlockMigrations = (); + type MultiBlockMigrator = (); + type PreInherents = (); + type PostInherents = (); + type PostTransactions = (); } /// Default configurations of this pallet in a relay-chain environment. @@ -451,6 +465,7 @@ pub mod pallet { + Clone + OriginTrait; + #[docify::export(system_runtime_call)] /// The aggregated `RuntimeCall` type. #[pallet::no_default_bounds] type RuntimeCall: Parameter @@ -523,7 +538,7 @@ pub mod pallet { #[pallet::constant] type DbWeight: Get; - /// Get the chain's current version. + /// Get the chain's in-code version. #[pallet::constant] type Version: Get; @@ -570,6 +585,35 @@ pub mod pallet { /// The maximum number of consumers allowed on a single account. type MaxConsumers: ConsumerLimits; + + /// All migrations that should run in the next runtime upgrade. + /// + /// These used to be formerly configured in `Executive`. Parachains need to ensure that + /// running all these migrations in one block will not overflow the weight limit of a block. + /// The migrations are run *before* the pallet `on_runtime_upgrade` hooks, just like the + /// `OnRuntimeUpgrade` migrations. + type SingleBlockMigrations: OnRuntimeUpgrade; + + /// The migrator that is used to run Multi-Block-Migrations. + /// + /// Can be set to [`pallet-migrations`] or an alternative implementation of the interface. + /// The diagram in `frame_executive::block_flowchart` explains when it runs. + type MultiBlockMigrator: MultiStepMigrator; + + /// A callback that executes in *every block* directly before all inherents were applied. + /// + /// See `frame_executive::block_flowchart` for a in-depth explanation when it runs. + type PreInherents: PreInherents; + + /// A callback that executes in *every block* directly after all inherents were applied. + /// + /// See `frame_executive::block_flowchart` for a in-depth explanation when it runs. + type PostInherents: PostInherents; + + /// A callback that executes in *every block* directly after all transactions were applied. + /// + /// See `frame_executive::block_flowchart` for a in-depth explanation when it runs. + type PostTransactions: PostTransactions; } #[pallet::pallet] @@ -617,6 +661,9 @@ pub mod pallet { } /// Set the new runtime code without doing any checks of the given `code`. + /// + /// Note that runtime upgrades will not run if this is called with a not-increasing spec + /// version! #[pallet::call_index(3)] #[pallet::weight((T::SystemWeightInfo::set_code(), DispatchClass::Operational))] pub fn set_code_without_checks( @@ -812,6 +859,8 @@ pub mod pallet { NonZeroRefCount, /// The origin filter prevent the call to be dispatched. CallFiltered, + /// A multi-block migration is ongoing and prevents the current code from being replaced. + MultiBlockMigrationsOngoing, #[cfg(feature = "experimental")] /// The specified [`Task`] is not valid. InvalidTask, @@ -843,11 +892,15 @@ pub mod pallet { #[pallet::storage] pub(super) type ExtrinsicCount = StorageValue<_, u32>; + /// Whether all inherents have been applied. + #[pallet::storage] + pub type InherentsApplied = StorageValue<_, bool, ValueQuery>; + /// The current weight for the block. #[pallet::storage] #[pallet::whitelist_storage] #[pallet::getter(fn block_weight)] - pub(super) type BlockWeight = StorageValue<_, ConsumedWeight, ValueQuery>; + pub type BlockWeight = StorageValue<_, ConsumedWeight, ValueQuery>; /// Total length (in bytes) for all extrinsics put together, for the current block. #[pallet::storage] @@ -892,6 +945,7 @@ pub mod pallet { /// just in case someone still reads them from within the runtime. #[pallet::storage] #[pallet::whitelist_storage] + #[pallet::disable_try_decode_storage] #[pallet::unbounded] pub(super) type Events = StorageValue<_, Vec>>, ValueQuery>; @@ -1370,6 +1424,19 @@ impl Pallet { Self::deposit_event(Event::CodeUpdated); } + /// Whether all inherents have been applied. + pub fn inherents_applied() -> bool { + InherentsApplied::::get() + } + + /// Note that all inherents have been applied. + /// + /// Should be called immediately after all inherents have been applied. Must be called at least + /// once per block. + pub fn note_inherents_applied() { + InherentsApplied::::put(true); + } + /// Increment the reference counter on an account. #[deprecated = "Use `inc_consumers` instead"] pub fn inc_ref(who: &T::AccountId) { @@ -1689,6 +1756,7 @@ impl Pallet { >::put(digest); >::put(parent_hash); >::insert(*number - One::one(), parent_hash); + >::kill(); // Remove previous block data from storage BlockWeight::::kill(); @@ -1735,6 +1803,7 @@ impl Pallet { ExecutionPhase::::kill(); AllExtrinsicsLen::::kill(); storage::unhashed::kill(well_known_keys::INTRABLOCK_ENTROPY); + InherentsApplied::::kill(); // The following fields // @@ -1978,10 +2047,14 @@ impl Pallet { /// Determine whether or not it is possible to update the code. /// - /// Checks the given code if it is a valid runtime wasm blob by instantianting + /// Checks the given code if it is a valid runtime wasm blob by instantiating /// it and extracting the runtime version of it. It checks that the runtime version /// of the old and new runtime has the same spec name and that the spec version is increasing. pub fn can_set_code(code: &[u8]) -> Result<(), sp_runtime::DispatchError> { + if T::MultiBlockMigrator::ongoing() { + return Err(Error::::MultiBlockMigrationsOngoing.into()) + } + let current_version = T::Version::get(); let new_version = sp_io::misc::runtime_version(code) .and_then(|v| RuntimeVersion::decode(&mut &v[..]).ok()) diff --git a/substrate/frame/system/src/mock.rs b/substrate/frame/system/src/mock.rs index c4108099e39ffdf306d7a9e5e0af8a619336eaea..7059845a7df33104a66348803fee6396bf2f93d2 100644 --- a/substrate/frame/system/src/mock.rs +++ b/substrate/frame/system/src/mock.rs @@ -86,6 +86,22 @@ impl Config for Test { type Version = Version; type AccountData = u32; type OnKilledAccount = RecordKilled; + type MultiBlockMigrator = MockedMigrator; +} + +parameter_types! { + pub static Ongoing: bool = false; +} + +pub struct MockedMigrator; +impl frame_support::migrations::MultiStepMigrator for MockedMigrator { + fn ongoing() -> bool { + Ongoing::get() + } + + fn step() -> Weight { + Weight::zero() + } } pub type SysEvent = frame_system::Event; diff --git a/substrate/frame/system/src/tests.rs b/substrate/frame/system/src/tests.rs index e437e7f9f39b0845d9f7eef33e3c90546c8dbbd2..b889b5ca046efe557e7706870c11febce8a358b2 100644 --- a/substrate/frame/system/src/tests.rs +++ b/substrate/frame/system/src/tests.rs @@ -675,6 +675,28 @@ fn set_code_with_real_wasm_blob() { }); } +#[test] +fn set_code_rejects_during_mbm() { + Ongoing::set(true); + + let executor = substrate_test_runtime_client::new_native_or_wasm_executor(); + let mut ext = new_test_ext(); + ext.register_extension(sp_core::traits::ReadRuntimeVersionExt::new(executor)); + ext.execute_with(|| { + System::set_block_number(1); + let res = System::set_code( + RawOrigin::Root.into(), + substrate_test_runtime_client::runtime::wasm_binary_unwrap().to_vec(), + ); + assert_eq!( + res, + Err(DispatchErrorWithPostInfo::from(Error::::MultiBlockMigrationsOngoing)) + ); + + assert!(System::events().is_empty()); + }); +} + #[test] fn set_code_via_authorization_works() { let executor = substrate_test_runtime_client::new_native_or_wasm_executor(); diff --git a/substrate/frame/tips/Cargo.toml b/substrate/frame/tips/Cargo.toml index ae308cf67f01f23c5485fee825221f08d4ee458d..7339cf0a8cce86ec15aebe6a55115f10360b1a7f 100644 --- a/substrate/frame/tips/Cargo.toml +++ b/substrate/frame/tips/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", features = ["derive"], optional = true } +serde = { features = ["derive"], optional = true, workspace = true, default-features = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/tips/src/lib.rs b/substrate/frame/tips/src/lib.rs index 4c7cfc3028a9f04c87022ffc89892b59ce3c5610..8c360fb57d72488553529ab4264eb7f8b444c064 100644 --- a/substrate/frame/tips/src/lib.rs +++ b/substrate/frame/tips/src/lib.rs @@ -122,7 +122,7 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); #[pallet::pallet] diff --git a/substrate/frame/transaction-payment/Cargo.toml b/substrate/frame/transaction-payment/Cargo.toml index 56c1cc4caa7dd597e8f291a58cb3c295e221c63c..275e1c01f927217726907f3e6045fc0b0d3681e5 100644 --- a/substrate/frame/transaction-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/Cargo.toml @@ -20,7 +20,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", optional = true } +serde = { optional = true, workspace = true, default-features = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } sp-core = { path = "../../primitives/core", default-features = false } @@ -29,7 +29,7 @@ sp-runtime = { path = "../../primitives/runtime", default-features = false } sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } pallet-balances = { path = "../balances" } [features] diff --git a/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml b/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml index 9a80081e04f655d12f058f76bf2e37fd82109cbf..1da3237df0817c989899b9180af984da9766aa81 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml @@ -30,10 +30,10 @@ frame-benchmarking = { path = "../../benchmarking", default-features = false, op # Other dependencies codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", optional = true } +serde = { optional = true, workspace = true, default-features = true } [dev-dependencies] -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } sp-storage = { path = "../../../primitives/storage", default-features = false } diff --git a/substrate/frame/transaction-storage/Cargo.toml b/substrate/frame/transaction-storage/Cargo.toml index 59b6e578571fcb29da1acc444415c587540d04a8..1386d9b5a5691475bdc06a198a86cb944b7e8305 100644 --- a/substrate/frame/transaction-storage/Cargo.toml +++ b/substrate/frame/transaction-storage/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] array-bytes = { version = "6.1", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", optional = true } +serde = { optional = true, workspace = true, default-features = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/treasury/Cargo.toml b/substrate/frame/treasury/Cargo.toml index edc68b386bcaa63aa9ca35e6594307a8ef13a303..5f90904123d9341425ff707ae7682e2f20f28ecf 100644 --- a/substrate/frame/treasury/Cargo.toml +++ b/substrate/frame/treasury/Cargo.toml @@ -23,7 +23,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = docify = "0.2.7" impl-trait-for-tuples = "0.2.2" scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", features = ["derive"], optional = true } +serde = { features = ["derive"], optional = true, workspace = true, default-features = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/primitives/api/Cargo.toml b/substrate/primitives/api/Cargo.toml index 3330d2a88de941896b2be432d404caa565a266eb..f4b1d13c520399a9d5960bf90958fdd5b154d10a 100644 --- a/substrate/primitives/api/Cargo.toml +++ b/substrate/primitives/api/Cargo.toml @@ -27,7 +27,7 @@ sp-version = { path = "../version", default-features = false } sp-state-machine = { path = "../state-machine", default-features = false, optional = true } sp-trie = { path = "../trie", default-features = false, optional = true } hash-db = { version = "0.16.0", optional = true } -thiserror = { version = "1.0.48", optional = true } +thiserror = { optional = true, workspace = true } scale-info = { version = "2.10.0", default-features = false, features = [ "derive", ] } diff --git a/substrate/primitives/api/proc-macro/Cargo.toml b/substrate/primitives/api/proc-macro/Cargo.toml index f0644eea8f45fa6b098c7adca82a08821700d047..f5406758687ae20e9ce13a2cf02959c5cccbd576 100644 --- a/substrate/primitives/api/proc-macro/Cargo.toml +++ b/substrate/primitives/api/proc-macro/Cargo.toml @@ -19,8 +19,8 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -quote = "1.0.28" -syn = { version = "2.0.49", features = ["extra-traits", "fold", "full", "visit"] } +quote = { workspace = true } +syn = { features = ["extra-traits", "fold", "full", "visit"], workspace = true } proc-macro2 = "1.0.56" blake2 = { version = "0.10.4", default-features = false } proc-macro-crate = "3.0.0" diff --git a/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs index 2b1e65ec88524a0236a464005fb2d1215c1157fe..e34e4c0e7672164afb41702ab3bae87082df9062 100644 --- a/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs +++ b/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs @@ -456,6 +456,7 @@ impl<'a> ToClientSideDecl<'a> { |err| #crate_::ApiError::FailedToDecodeReturnValue { function: #function_name, error: err, + raw: r.clone(), } ) ) diff --git a/substrate/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs index c1339ff6621b389e32c2f9ca2a7f19079c2d269e..1761e0ac9dbf450c15adae2659119c918f300334 100644 --- a/substrate/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs +++ b/substrate/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs @@ -158,7 +158,7 @@ fn implement_common_api_traits(block_type: TypePath, self_ty: Type) -> Result::Hash, _: &<#block_type as #crate_::BlockT>::Header, - ) -> std::result::Result<(), #crate_::ApiError> { + ) -> std::result::Result<#crate_::__private::ExtrinsicInclusionMode, #crate_::ApiError> { unimplemented!("`Core::initialize_block` not implemented for runtime api mocks") } } diff --git a/substrate/primitives/api/src/lib.rs b/substrate/primitives/api/src/lib.rs index 190de1ab3fdee0611f27071b9c769a164c3aca9d..a945b9f21f3cff60d6a9d5e24aa07704a1dc8d31 100644 --- a/substrate/primitives/api/src/lib.rs +++ b/substrate/primitives/api/src/lib.rs @@ -101,7 +101,7 @@ pub mod __private { generic::BlockId, traits::{Block as BlockT, Hash as HashT, HashingFor, Header as HeaderT, NumberFor}, transaction_validity::TransactionValidity, - RuntimeString, TransactionOutcome, + ExtrinsicInclusionMode, RuntimeString, TransactionOutcome, }; pub use sp_std::{mem, slice, vec}; pub use sp_version::{create_apis_vec, ApiId, ApisVec, RuntimeVersion}; @@ -115,11 +115,11 @@ pub use sp_core::traits::CallContext; use sp_core::OpaqueMetadata; #[cfg(feature = "std")] use sp_externalities::{Extension, Extensions}; -use sp_runtime::traits::Block as BlockT; #[cfg(feature = "std")] use sp_runtime::traits::HashingFor; #[cfg(feature = "std")] pub use sp_runtime::TransactionOutcome; +use sp_runtime::{traits::Block as BlockT, ExtrinsicInclusionMode}; #[cfg(feature = "std")] pub use sp_state_machine::StorageProof; #[cfg(feature = "std")] @@ -280,7 +280,7 @@ pub use sp_api_proc_macro::decl_runtime_apis; /// ```rust /// use sp_version::create_runtime_str; /// # -/// # use sp_runtime::traits::Block as BlockT; +/// # use sp_runtime::{ExtrinsicInclusionMode, traits::Block as BlockT}; /// # use sp_test_primitives::Block; /// # /// # /// The declaration of the `Runtime` type is done by the `construct_runtime!` macro @@ -307,7 +307,9 @@ pub use sp_api_proc_macro::decl_runtime_apis; /// # unimplemented!() /// # } /// # fn execute_block(_block: Block) {} -/// # fn initialize_block(_header: &::Header) {} +/// # fn initialize_block(_header: &::Header) -> ExtrinsicInclusionMode { +/// # unimplemented!() +/// # } /// # } /// /// impl self::Balance for Runtime { @@ -540,11 +542,12 @@ pub fn init_runtime_logger() { #[cfg(feature = "std")] #[derive(Debug, thiserror::Error)] pub enum ApiError { - #[error("Failed to decode return value of {function}")] + #[error("Failed to decode return value of {function}: {error} raw data: {raw:?}")] FailedToDecodeReturnValue { function: &'static str, #[source] error: codec::Error, + raw: Vec, }, #[error("Failed to convert return value from runtime to node of {function}")] FailedToConvertReturnValue { @@ -800,15 +803,18 @@ pub fn deserialize_runtime_api_info(bytes: [u8; RUNTIME_API_INFO_SIZE]) -> ([u8; decl_runtime_apis! { /// The `Core` runtime api that every Substrate runtime needs to implement. #[core_trait] - #[api_version(4)] + #[api_version(5)] pub trait Core { /// Returns the version of the runtime. fn version() -> RuntimeVersion; /// Execute the given block. fn execute_block(block: Block); /// Initialize a block with the given header. + #[changed_in(5)] #[renamed("initialise_block", 2)] fn initialize_block(header: &::Header); + /// Initialize a block with the given header and return the runtime executive mode. + fn initialize_block(header: &::Header) -> ExtrinsicInclusionMode; } /// The `Metadata` api trait that returns metadata for the runtime. diff --git a/substrate/primitives/api/test/tests/decl_and_impl.rs b/substrate/primitives/api/test/tests/decl_and_impl.rs index d68470551d20061aa09d277a9999db81654ff47d..211a08561fd4bcf601d9508d96447fc07f82657f 100644 --- a/substrate/primitives/api/test/tests/decl_and_impl.rs +++ b/substrate/primitives/api/test/tests/decl_and_impl.rs @@ -139,7 +139,7 @@ impl_runtime_apis! { fn execute_block(_: Block) { unimplemented!() } - fn initialize_block(_: &::Header) { + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { unimplemented!() } } diff --git a/substrate/primitives/api/test/tests/ui/impl_incorrect_method_signature.rs b/substrate/primitives/api/test/tests/ui/impl_incorrect_method_signature.rs index 43718e4cd04a3f25fcc805edec3c14283f1f5f56..262a874213a5662c0c87a79989b620c661e707c7 100644 --- a/substrate/primitives/api/test/tests/ui/impl_incorrect_method_signature.rs +++ b/substrate/primitives/api/test/tests/ui/impl_incorrect_method_signature.rs @@ -40,7 +40,7 @@ sp_api::impl_runtime_apis! { fn execute_block(_: Block) { unimplemented!() } - fn initialize_block(_: &::Header) { + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { unimplemented!() } } diff --git a/substrate/primitives/api/test/tests/ui/impl_missing_version.rs b/substrate/primitives/api/test/tests/ui/impl_missing_version.rs index 560257b5168c921ce112a56fcbd7e9c303363c77..58850ab343fbb7bcc2c04b01c7aec9cb881023d0 100644 --- a/substrate/primitives/api/test/tests/ui/impl_missing_version.rs +++ b/substrate/primitives/api/test/tests/ui/impl_missing_version.rs @@ -45,7 +45,7 @@ sp_api::impl_runtime_apis! { fn execute_block(_: Block) { unimplemented!() } - fn initialize_block(_: &::Header) { + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { unimplemented!() } } diff --git a/substrate/primitives/api/test/tests/ui/missing_versioned_method.rs b/substrate/primitives/api/test/tests/ui/missing_versioned_method.rs index 6ead545f85a11e8f0382331e77145acb4a053d5e..70f75d065154afcb807e1712c95fc24eca5267b1 100644 --- a/substrate/primitives/api/test/tests/ui/missing_versioned_method.rs +++ b/substrate/primitives/api/test/tests/ui/missing_versioned_method.rs @@ -44,7 +44,7 @@ sp_api::impl_runtime_apis! { fn execute_block(_: Block) { unimplemented!() } - fn initialize_block(_: &::Header) { + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { unimplemented!() } } diff --git a/substrate/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.rs b/substrate/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.rs index 8eebc1d79babcbd6fb95876a530a920397c035a6..63032000040b7d19f8e31fc4ac3221369aef9e7e 100644 --- a/substrate/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.rs +++ b/substrate/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.rs @@ -47,7 +47,7 @@ sp_api::impl_runtime_apis! { fn execute_block(_: Block) { unimplemented!() } - fn initialize_block(_: &::Header) { + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { unimplemented!() } } diff --git a/substrate/primitives/api/test/tests/ui/positive_cases/custom_where_bound.rs b/substrate/primitives/api/test/tests/ui/positive_cases/custom_where_bound.rs index 594556d57be50dbd151de486dc22a63a733b933b..0858813bc99941be005e0c0141b2defc3a96a3d3 100644 --- a/substrate/primitives/api/test/tests/ui/positive_cases/custom_where_bound.rs +++ b/substrate/primitives/api/test/tests/ui/positive_cases/custom_where_bound.rs @@ -51,7 +51,7 @@ sp_api::impl_runtime_apis! { fn execute_block(_: Block) { unimplemented!() } - fn initialize_block(_: &::Header) { + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { unimplemented!() } } diff --git a/substrate/primitives/api/test/tests/ui/positive_cases/default_impls.rs b/substrate/primitives/api/test/tests/ui/positive_cases/default_impls.rs index ae573238ffe144d26b7e5dd4528e8a9e5ab88a9e..3e0cb79156c8bf5c218e29e3024d181a69fc4da6 100644 --- a/substrate/primitives/api/test/tests/ui/positive_cases/default_impls.rs +++ b/substrate/primitives/api/test/tests/ui/positive_cases/default_impls.rs @@ -46,7 +46,7 @@ sp_api::impl_runtime_apis! { fn execute_block(_: Block) { unimplemented!() } - fn initialize_block(_: &::Header) { + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { unimplemented!() } } diff --git a/substrate/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.rs b/substrate/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.rs index 921bf0d04351dac4ddcdd18030619f1ebcf15008..b2caea7ab7e44d910628d0ac834017e66e7e5d2e 100644 --- a/substrate/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.rs +++ b/substrate/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.rs @@ -42,7 +42,7 @@ sp_api::impl_runtime_apis! { fn execute_block(_: Block) { unimplemented!() } - fn initialize_block(_: &::Header) { + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { unimplemented!() } } diff --git a/substrate/primitives/application-crypto/Cargo.toml b/substrate/primitives/application-crypto/Cargo.toml index cf57aa4b76d23609ea4e4d3a8f9fb8b436bda07d..6f90a2b6262e50d7edd23e415924a8259f0a0d96 100644 --- a/substrate/primitives/application-crypto/Cargo.toml +++ b/substrate/primitives/application-crypto/Cargo.toml @@ -21,7 +21,7 @@ targets = ["x86_64-unknown-linux-gnu"] sp-core = { path = "../core", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", default-features = false, optional = true, features = ["alloc", "derive"] } +serde = { optional = true, features = ["alloc", "derive"], workspace = true } sp-std = { path = "../std", default-features = false } sp-io = { path = "../io", default-features = false } diff --git a/substrate/primitives/application-crypto/check-features-variants.sh b/substrate/primitives/application-crypto/check-features-variants.sh new file mode 100755 index 0000000000000000000000000000000000000000..dd45a212bae097beb40171295e254a601674ff00 --- /dev/null +++ b/substrate/primitives/application-crypto/check-features-variants.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env -S bash -eux + +export RUSTFLAGS="-Cdebug-assertions=y -Dwarnings" +T=wasm32-unknown-unknown +cargo check --release +cargo check --release --target=$T --no-default-features +cargo check --release --target=$T --no-default-features --features="full_crypto" +cargo check --release --target=$T --no-default-features --features="serde" +cargo check --release --target=$T --no-default-features --features="serde,full_crypto" +cargo check --release --target=$T --no-default-features --features="bandersnatch-experimental" +cargo check --release --target=$T --no-default-features --features="bls-experimental" +cargo check --release --target=$T --no-default-features --features="bls-experimental,full_crypto" diff --git a/substrate/primitives/application-crypto/src/bls377.rs b/substrate/primitives/application-crypto/src/bls377.rs index ee17060564fa8bec54888c1123b7c86af13863d3..3bd01de139c9496d35b143ddeb07de1285154946 100644 --- a/substrate/primitives/application-crypto/src/bls377.rs +++ b/substrate/primitives/application-crypto/src/bls377.rs @@ -19,14 +19,13 @@ use crate::{KeyTypeId, RuntimePublic}; pub use sp_core::bls::bls377::*; +use sp_std::vec::Vec; mod app { crate::app_crypto!(super, sp_core::testing::BLS377); } -#[cfg(feature = "full_crypto")] -pub use app::Pair as AppPair; -pub use app::{Public as AppPublic, Signature as AppSignature}; +pub use app::{Pair as AppPair, Public as AppPublic, Signature as AppSignature}; impl RuntimePublic for Public { type Signature = Signature; diff --git a/substrate/primitives/application-crypto/src/ecdsa.rs b/substrate/primitives/application-crypto/src/ecdsa.rs index 27ffe12579f5591cf71a0f22cd5bb2702ce83afb..439b51dc604509788bd16b25aea2dbf6201471bd 100644 --- a/substrate/primitives/application-crypto/src/ecdsa.rs +++ b/substrate/primitives/application-crypto/src/ecdsa.rs @@ -27,9 +27,7 @@ mod app { crate::app_crypto!(super, sp_core::testing::ECDSA); } -#[cfg(feature = "full_crypto")] -pub use app::Pair as AppPair; -pub use app::{Public as AppPublic, Signature as AppSignature}; +pub use app::{Pair as AppPair, Public as AppPublic, Signature as AppSignature}; impl RuntimePublic for Public { type Signature = Signature; diff --git a/substrate/primitives/application-crypto/src/ecdsa_bls377.rs b/substrate/primitives/application-crypto/src/ecdsa_bls377.rs index 70940587cedaf6312dbc75834d2180131ca157b4..8dee73095fb2e2fcbeb968edef2aad667112767a 100644 --- a/substrate/primitives/application-crypto/src/ecdsa_bls377.rs +++ b/substrate/primitives/application-crypto/src/ecdsa_bls377.rs @@ -18,6 +18,7 @@ //! ECDSA and BLS12-377 paired crypto applications. use crate::{KeyTypeId, RuntimePublic}; +use sp_std::vec::Vec; pub use sp_core::paired_crypto::ecdsa_bls377::*; diff --git a/substrate/primitives/application-crypto/src/ed25519.rs b/substrate/primitives/application-crypto/src/ed25519.rs index bc05018370edb16c41ed1dddef2dafd7d785cc32..addefe7daf6431415f1c06727b88edd1b3bd9158 100644 --- a/substrate/primitives/application-crypto/src/ed25519.rs +++ b/substrate/primitives/application-crypto/src/ed25519.rs @@ -27,9 +27,7 @@ mod app { crate::app_crypto!(super, sp_core::testing::ED25519); } -#[cfg(feature = "full_crypto")] -pub use app::Pair as AppPair; -pub use app::{Public as AppPublic, Signature as AppSignature}; +pub use app::{Pair as AppPair, Public as AppPublic, Signature as AppSignature}; impl RuntimePublic for Public { type Signature = Signature; diff --git a/substrate/primitives/application-crypto/src/lib.rs b/substrate/primitives/application-crypto/src/lib.rs index 686b486f335301e8750253cb2e306a0f5cb222d4..ea2e5a83127bb50a8060e22d81660c80e9e18ee2 100644 --- a/substrate/primitives/application-crypto/src/lib.rs +++ b/substrate/primitives/application-crypto/src/lib.rs @@ -20,12 +20,9 @@ #![warn(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] -pub use sp_core::crypto::{key_types, CryptoTypeId, KeyTypeId}; +pub use sp_core::crypto::{key_types, CryptoTypeId, DeriveJunction, KeyTypeId, Ss58Codec}; #[doc(hidden)] -#[cfg(feature = "full_crypto")] pub use sp_core::crypto::{DeriveError, Pair, SecretStringError}; -#[cfg(any(feature = "full_crypto", feature = "serde"))] -pub use sp_core::crypto::{DeriveJunction, Ss58Codec}; #[doc(hidden)] pub use sp_core::{ self, @@ -85,7 +82,7 @@ macro_rules! app_crypto { $module::CRYPTO_ID ); $crate::app_crypto_signature_common!($module::Signature, $key_type); - $crate::app_crypto_pair!($module::Pair, $key_type, $module::CRYPTO_ID); + $crate::app_crypto_pair_common!($module::Pair, $key_type, $module::CRYPTO_ID); }; } @@ -116,13 +113,15 @@ macro_rules! app_crypto { $module::CRYPTO_ID ); $crate::app_crypto_signature_common!($module::Signature, $key_type); + $crate::app_crypto_pair_common!($module::Pair, $key_type, $module::CRYPTO_ID); }; } /// Declares `Pair` type which is functionally equivalent to `$pair`, but is /// new application-specific type whose identifier is `$key_type`. +/// It is a common part shared between full_crypto and non full_crypto environments. #[macro_export] -macro_rules! app_crypto_pair { +macro_rules! app_crypto_pair_common { ($pair:ty, $key_type:expr, $crypto_type:expr) => { $crate::wrap! { /// A generic `AppPublic` wrapper type over $pair crypto; this has no specific App. @@ -140,7 +139,14 @@ macro_rules! app_crypto_pair { type Signature = Signature; $crate::app_crypto_pair_functions_if_std!($pair); + $crate::app_crypto_pair_functions_if_full_crypto!($pair); + fn from_phrase( + phrase: &str, + password: Option<&str>, + ) -> Result<(Self, Self::Seed), $crate::SecretStringError> { + <$pair>::from_phrase(phrase, password).map(|r| (Self(r.0), r.1)) + } fn derive>( &self, path: Iter, @@ -154,9 +160,6 @@ macro_rules! app_crypto_pair { fn from_seed_slice(seed: &[u8]) -> Result { <$pair>::from_seed_slice(seed).map(Self) } - fn sign(&self, msg: &[u8]) -> Self::Signature { - Signature(self.0.sign(msg)) - } fn verify>( sig: &Self::Signature, message: M, @@ -203,13 +206,6 @@ macro_rules! app_crypto_pair_functions_if_std { let r = <$pair>::generate_with_phrase(password); (Self(r.0), r.1, r.2) } - - fn from_phrase( - phrase: &str, - password: Option<&str>, - ) -> Result<(Self, Self::Seed), $crate::SecretStringError> { - <$pair>::from_phrase(phrase, password).map(|r| (Self(r.0), r.1)) - } }; } @@ -220,6 +216,25 @@ macro_rules! app_crypto_pair_functions_if_std { ($pair:ty) => {}; } +/// Implements functions for the `Pair` trait when `feature = "full_crypto"` is enabled. +#[doc(hidden)] +#[cfg(feature = "full_crypto")] +#[macro_export] +macro_rules! app_crypto_pair_functions_if_full_crypto { + ($pair:ty) => { + fn sign(&self, msg: &[u8]) -> Self::Signature { + Signature(self.0.sign(msg)) + } + }; +} + +#[doc(hidden)] +#[cfg(not(feature = "full_crypto"))] +#[macro_export] +macro_rules! app_crypto_pair_functions_if_full_crypto { + ($pair:ty) => {}; +} + /// Declares `Public` type which is functionally equivalent to `$public` but is /// new application-specific type whose identifier is `$key_type`. /// For full functionality, `app_crypto_public_common!` must be called too. @@ -267,7 +282,7 @@ macro_rules! app_crypto_public_not_full_crypto { $crate::wrap! { /// A generic `AppPublic` wrapper type over $public crypto; this has no specific App. #[derive( - Clone, Eq, PartialEq, Ord, PartialOrd, + Clone, Eq, Hash, PartialEq, Ord, PartialOrd, $crate::codec::Encode, $crate::codec::Decode, $crate::RuntimeDebug, @@ -277,10 +292,13 @@ macro_rules! app_crypto_public_not_full_crypto { pub struct Public($public); } - impl $crate::CryptoType for Public {} + impl $crate::CryptoType for Public { + type Pair = Pair; + } impl $crate::AppCrypto for Public { type Public = Public; + type Pair = Pair; type Signature = Signature; const ID: $crate::KeyTypeId = $key_type; const CRYPTO_ID: $crate::CryptoTypeId = $crypto_type; @@ -452,10 +470,13 @@ macro_rules! app_crypto_signature_not_full_crypto { pub struct Signature($sig); } - impl $crate::CryptoType for Signature {} + impl $crate::CryptoType for Signature { + type Pair = Pair; + } impl $crate::AppCrypto for Signature { type Public = Public; + type Pair = Pair; type Signature = Signature; const ID: $crate::KeyTypeId = $key_type; const CRYPTO_ID: $crate::CryptoTypeId = $crypto_type; diff --git a/substrate/primitives/application-crypto/src/sr25519.rs b/substrate/primitives/application-crypto/src/sr25519.rs index 7c91bfa7bb5ffe221a618769f2cb3de3e054d042..d411cc253c0d84fd0bd7f366a7dec86f4c9acc91 100644 --- a/substrate/primitives/application-crypto/src/sr25519.rs +++ b/substrate/primitives/application-crypto/src/sr25519.rs @@ -27,9 +27,7 @@ mod app { crate::app_crypto!(super, sp_core::testing::SR25519); } -#[cfg(feature = "full_crypto")] -pub use app::Pair as AppPair; -pub use app::{Public as AppPublic, Signature as AppSignature}; +pub use app::{Pair as AppPair, Public as AppPublic, Signature as AppSignature}; impl RuntimePublic for Public { type Signature = Signature; diff --git a/substrate/primitives/application-crypto/src/traits.rs b/substrate/primitives/application-crypto/src/traits.rs index e9b1080f63d9ce96ced35989cfba56887f03feb9..0b59abf272dc7761f08c8604db33762b07e31584 100644 --- a/substrate/primitives/application-crypto/src/traits.rs +++ b/substrate/primitives/application-crypto/src/traits.rs @@ -18,9 +18,7 @@ use codec::Codec; use scale_info::TypeInfo; -#[cfg(feature = "full_crypto")] -use sp_core::crypto::Pair; -use sp_core::crypto::{CryptoType, CryptoTypeId, IsWrappedBy, KeyTypeId, Public}; +use sp_core::crypto::{CryptoType, CryptoTypeId, IsWrappedBy, KeyTypeId, Pair, Public}; use sp_std::{fmt::Debug, vec::Vec}; /// Application-specific cryptographic object. @@ -45,24 +43,14 @@ pub trait AppCrypto: 'static + Sized + CryptoType { type Signature: AppSignature; /// The corresponding key pair type in this application scheme. - #[cfg(feature = "full_crypto")] type Pair: AppPair; } /// Type which implements Hash in std, not when no-std (std variant). -#[cfg(any(feature = "std", feature = "full_crypto"))] pub trait MaybeHash: sp_std::hash::Hash {} -#[cfg(any(feature = "std", feature = "full_crypto"))] impl MaybeHash for T {} -/// Type which implements Hash in std, not when no-std (no-std variant). -#[cfg(all(not(feature = "std"), not(feature = "full_crypto")))] -pub trait MaybeHash {} -#[cfg(all(not(feature = "std"), not(feature = "full_crypto")))] -impl MaybeHash for T {} - /// Application-specific key pair. -#[cfg(feature = "full_crypto")] pub trait AppPair: AppCrypto + Pair::Public, Signature = ::Signature> { diff --git a/substrate/primitives/arithmetic/Cargo.toml b/substrate/primitives/arithmetic/Cargo.toml index 92f9d23620ef9d75d4105a3db440e5184d60fc44..301821ad6893117661e439921f23d4d5d48f16b5 100644 --- a/substrate/primitives/arithmetic/Cargo.toml +++ b/substrate/primitives/arithmetic/Cargo.toml @@ -24,7 +24,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = integer-sqrt = "0.1.2" num-traits = { version = "0.2.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", default-features = false, features = ["alloc", "derive"], optional = true } +serde = { features = ["alloc", "derive"], optional = true, workspace = true } static_assertions = "1.1.0" sp-std = { path = "../std", default-features = false } diff --git a/substrate/primitives/blockchain/Cargo.toml b/substrate/primitives/blockchain/Cargo.toml index 26ba41645e52949cb69f4048a14f065243d7756b..9d13d627eebc36dcc5bdce3efd073a7dc29012a0 100644 --- a/substrate/primitives/blockchain/Cargo.toml +++ b/substrate/primitives/blockchain/Cargo.toml @@ -22,7 +22,7 @@ futures = "0.3.21" log = { workspace = true, default-features = true } parking_lot = "0.12.1" schnellru = "0.2.1" -thiserror = "1.0.48" +thiserror = { workspace = true } sp-api = { path = "../api" } sp-consensus = { path = "../consensus/common" } sp-database = { path = "../database" } diff --git a/substrate/primitives/consensus/babe/Cargo.toml b/substrate/primitives/consensus/babe/Cargo.toml index f6bc94e3adfb5409b95cf364fd665ced0b9b7403..8b3006f79a7ffc87a69a7eb2921f4b4f0533d1a1 100644 --- a/substrate/primitives/consensus/babe/Cargo.toml +++ b/substrate/primitives/consensus/babe/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] async-trait = { version = "0.1.74", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", default-features = false, features = ["alloc", "derive"], optional = true } +serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-api = { path = "../../api", default-features = false } sp-application-crypto = { path = "../../application-crypto", default-features = false } sp-consensus-slots = { path = "../slots", default-features = false } diff --git a/substrate/primitives/consensus/babe/src/inherents.rs b/substrate/primitives/consensus/babe/src/inherents.rs index 909769f3031be658e758087c4323ed7ec80fdcce..54b7b64401673927602a3e816cc9d5dcb9ede14a 100644 --- a/substrate/primitives/consensus/babe/src/inherents.rs +++ b/substrate/primitives/consensus/babe/src/inherents.rs @@ -17,7 +17,6 @@ //! Inherents for BABE -use core::result::Result; use sp_inherents::{Error, InherentData, InherentIdentifier}; /// The BABE inherent identifier. diff --git a/substrate/primitives/consensus/babe/src/lib.rs b/substrate/primitives/consensus/babe/src/lib.rs index d6b2cdd55e0daddf7e49a132d48bbc8c1741736a..ff0b4568226ef129305d6ab516d6b7032c54c2a0 100644 --- a/substrate/primitives/consensus/babe/src/lib.rs +++ b/substrate/primitives/consensus/babe/src/lib.rs @@ -256,6 +256,12 @@ pub struct BabeEpochConfiguration { pub allowed_slots: AllowedSlots, } +impl Default for BabeEpochConfiguration { + fn default() -> Self { + Self { c: (1, 4), allowed_slots: AllowedSlots::PrimaryAndSecondaryVRFSlots } + } +} + /// Verifies the equivocation proof by making sure that: both headers have /// different hashes, are targetting the same slot, and have valid signatures by /// the same authority. diff --git a/substrate/primitives/consensus/beefy/Cargo.toml b/substrate/primitives/consensus/beefy/Cargo.toml index 38fc67b7f526e8c57caa0a0b4c6f8497ddc3c030..8ab817d52ef93469959c4de17a459a319b07b4e0 100644 --- a/substrate/primitives/consensus/beefy/Cargo.toml +++ b/substrate/primitives/consensus/beefy/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", default-features = false, optional = true, features = ["alloc", "derive"] } +serde = { optional = true, features = ["alloc", "derive"], workspace = true } sp-api = { path = "../../api", default-features = false } sp-application-crypto = { path = "../../application-crypto", default-features = false } sp-core = { path = "../../core", default-features = false } diff --git a/substrate/primitives/consensus/beefy/src/commitment.rs b/substrate/primitives/consensus/beefy/src/commitment.rs index 1f0fb34ebf10b3e357429bf9bb501677dbea4695..335c6b604f044872ca5dc20319d0e2de94313606 100644 --- a/substrate/primitives/consensus/beefy/src/commitment.rs +++ b/substrate/primitives/consensus/beefy/src/commitment.rs @@ -97,6 +97,19 @@ pub struct SignedCommitment { pub signatures: Vec>, } +impl sp_std::fmt::Display + for SignedCommitment +{ + fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { + let signatures_count = self.signatures.iter().filter(|s| s.is_some()).count(); + write!( + f, + "SignedCommitment(commitment: {:?}, signatures_count: {})", + self.commitment, signatures_count + ) + } +} + impl SignedCommitment { /// Return the number of collected signatures. pub fn no_of_signatures(&self) -> usize { @@ -241,6 +254,14 @@ pub enum VersionedFinalityProof { V1(SignedCommitment), } +impl sp_std::fmt::Display for VersionedFinalityProof { + fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { + match self { + VersionedFinalityProof::V1(sc) => write!(f, "VersionedFinalityProof::V1({})", sc), + } + } +} + impl From> for VersionedFinalityProof { fn from(commitment: SignedCommitment) -> Self { VersionedFinalityProof::V1(commitment) diff --git a/substrate/primitives/consensus/common/Cargo.toml b/substrate/primitives/consensus/common/Cargo.toml index cbca0d94d3e5ab1172df2dd9e5a4c226e85969de..048e31b0265f57f0404b3777897a27f42fe3de68 100644 --- a/substrate/primitives/consensus/common/Cargo.toml +++ b/substrate/primitives/consensus/common/Cargo.toml @@ -20,7 +20,7 @@ targets = ["x86_64-unknown-linux-gnu"] async-trait = "0.1.74" futures = { version = "0.3.21", features = ["thread-pool"] } log = { workspace = true, default-features = true } -thiserror = "1.0.48" +thiserror = { workspace = true } sp-core = { path = "../../core" } sp-inherents = { path = "../../inherents" } sp-runtime = { path = "../../runtime" } diff --git a/substrate/primitives/consensus/grandpa/Cargo.toml b/substrate/primitives/consensus/grandpa/Cargo.toml index b556b43bc950b08319398339ba76fd56850484e2..b06208a4308b355031adde70de72e0e136debda5 100644 --- a/substrate/primitives/consensus/grandpa/Cargo.toml +++ b/substrate/primitives/consensus/grandpa/Cargo.toml @@ -21,7 +21,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = grandpa = { package = "finality-grandpa", version = "0.16.2", default-features = false, features = ["derive-codec"] } log = { workspace = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", features = ["alloc", "derive"], default-features = false, optional = true } +serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-api = { path = "../../api", default-features = false } sp-application-crypto = { path = "../../application-crypto", default-features = false } sp-core = { path = "../../core", default-features = false } diff --git a/substrate/primitives/consensus/sassafras/Cargo.toml b/substrate/primitives/consensus/sassafras/Cargo.toml index 4c908201127bc024f3ae1a5543ff1680f24a05c9..b707ad18b5b9c6d4649a31a0c8f6a4dd4eeaf1f8 100644 --- a/substrate/primitives/consensus/sassafras/Cargo.toml +++ b/substrate/primitives/consensus/sassafras/Cargo.toml @@ -20,7 +20,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] scale-codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", default-features = false, features = ["derive"], optional = true } +serde = { features = ["derive"], optional = true, workspace = true } sp-api = { path = "../../api", default-features = false } sp-application-crypto = { path = "../../application-crypto", default-features = false, features = ["bandersnatch-experimental"] } sp-consensus-slots = { path = "../slots", default-features = false } diff --git a/substrate/primitives/consensus/slots/Cargo.toml b/substrate/primitives/consensus/slots/Cargo.toml index fadcd1215eee8f4780457ec50052413512812af0..8372b2b04a6b662cd76a07b6d7245ba9ba46b251 100644 --- a/substrate/primitives/consensus/slots/Cargo.toml +++ b/substrate/primitives/consensus/slots/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0", default-features = false, features = ["alloc", "derive"], optional = true } +serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-std = { path = "../../std", default-features = false } sp-timestamp = { path = "../../timestamp", default-features = false } diff --git a/substrate/primitives/core/Cargo.toml b/substrate/primitives/core/Cargo.toml index c06dd9197ed790bbb2c215d79ad607096a2afe14..908f2498de5330c307e164f35c6f9ccfc1aa7b76 100644 --- a/substrate/primitives/core/Cargo.toml +++ b/substrate/primitives/core/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } log = { workspace = true } -serde = { version = "1.0.196", optional = true, default-features = false, features = ["alloc", "derive"] } +serde = { optional = true, features = ["alloc", "derive"], workspace = true } bounded-collections = { version = "0.2.0", default-features = false } primitive-types = { version = "0.12.0", default-features = false, features = ["codec", "scale-info"] } impl-serde = { version = "0.4.0", default-features = false, optional = true } @@ -27,10 +27,11 @@ hash-db = { version = "0.16.0", default-features = false } hash256-std-hasher = { version = "0.15.2", default-features = false } bs58 = { version = "0.5.0", default-features = false, optional = true } rand = { version = "0.8.5", features = ["small_rng"], optional = true } -substrate-bip39 = { version = "0.4.5", optional = true } -bip39 = { version = "2.0.0", default-features = false } +substrate-bip39 = { path = "../../utils/substrate-bip39", default-features = false } +# personal fork here as workaround for: https://github.com/rust-bitcoin/rust-bip39/pull/64 +bip39 = { package = "parity-bip39", version = "2.0.1", default-features = false, features = ["alloc"] } zeroize = { version = "1.4.3", default-features = false } -secrecy = { version = "0.8.0", default-features = false } +secrecy = { version = "0.8.0", default-features = false, features = ["alloc"] } parking_lot = { version = "0.12.1", optional = true } ss58-registry = { version = "1.34.0", default-features = false } sp-std = { path = "../std", default-features = false } @@ -39,22 +40,25 @@ sp-storage = { path = "../storage", default-features = false } sp-externalities = { path = "../externalities", optional = true } futures = { version = "0.3.21", optional = true } dyn-clonable = { version = "0.9.0", optional = true } -thiserror = { version = "1.0.48", optional = true } +thiserror = { optional = true, workspace = true } tracing = { version = "0.1.29", optional = true } bitflags = "1.3" paste = "1.0.7" itertools = { version = "0.10.3", optional = true } # full crypto -array-bytes = { version = "6.1", optional = true } -ed25519-zebra = { version = "3.1.0", default-features = false, optional = true } +array-bytes = { version = "6.1" } +ed25519-zebra = { version = "3.1.0", default-features = false } blake2 = { version = "0.10.4", default-features = false, optional = true } -libsecp256k1 = { version = "0.7", default-features = false, features = ["static-context"], optional = true } +libsecp256k1 = { version = "0.7", default-features = false, features = ["static-context"] } schnorrkel = { version = "0.11.4", features = ["preaudit_deprecated"], default-features = false } merlin = { version = "3.0", default-features = false } -secp256k1 = { version = "0.28.0", default-features = false, features = ["alloc", "recovery"], optional = true } -sp-crypto-hashing = { path = "../crypto/hashing", default-features = false, optional = true } +sp-crypto-hashing = { path = "../crypto/hashing", default-features = false } sp-runtime-interface = { path = "../runtime-interface", default-features = false } +# k256 crate, better portability, intended to be used in substrate-runtimes (no-std) +k256 = { version = "0.13.3", features = ["alloc", "ecdsa"], default-features = false } +# secp256k1 crate, better performance, intended to be used on host side (std) +secp256k1 = { version = "0.28.0", default-features = false, features = ["alloc", "recovery"], optional = true } # bls crypto w3f-bls = { version = "0.1.3", default-features = false, optional = true } @@ -63,7 +67,7 @@ bandersnatch_vrfs = { git = "https://github.com/w3f/ring-vrf", rev = "e9782f9", [dev-dependencies] criterion = "0.4.0" -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } lazy_static = "1.4.0" regex = "1.6.0" @@ -76,8 +80,8 @@ bench = false [features] default = ["std"] + std = [ - "array-bytes", "bandersnatch_vrfs?/std", "bip39/rand", "bip39/std", @@ -94,6 +98,7 @@ std = [ "hash256-std-hasher/std", "impl-serde/std", "itertools", + "k256/std", "libsecp256k1/std", "log/std", "merlin/std", @@ -107,7 +112,6 @@ std = [ "schnorrkel/std", "secp256k1/global-context", "secp256k1/std", - "secrecy/alloc", "serde/std", "sp-crypto-hashing/std", "sp-debug-derive/std", @@ -116,7 +120,7 @@ std = [ "sp-std/std", "sp-storage/std", "ss58-registry/std", - "substrate-bip39", + "substrate-bip39/std", "thiserror", "tracing", "w3f-bls?/std", @@ -126,16 +130,14 @@ std = [ # Serde support without relying on std features. serde = [ - "array-bytes", "blake2", "bounded-collections/serde", "bs58/alloc", "dep:serde", "impl-serde", + "k256/serde", "primitive-types/serde_no_std", "scale-info/serde", - "secrecy/alloc", - "sp-crypto-hashing", "sp-storage/serde", ] @@ -143,12 +145,7 @@ serde = [ # or Intel SGX. # For the regular wasm runtime builds this should not be used. full_crypto = [ - "array-bytes", "blake2", - "ed25519-zebra", - "libsecp256k1", - "secp256k1", - "sp-crypto-hashing", "sp-runtime-interface/disable_target_static_assertions", ] diff --git a/substrate/primitives/core/check-features-variants.sh b/substrate/primitives/core/check-features-variants.sh new file mode 100755 index 0000000000000000000000000000000000000000..6d28212065a623096202f9da483aead71806c513 --- /dev/null +++ b/substrate/primitives/core/check-features-variants.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env -S bash -eux + +export RUSTFLAGS="-Cdebug-assertions=y -Dwarnings" +T=wasm32-unknown-unknown + +cargo check --target=$T --release --no-default-features --features="bls-experimental" +cargo check --target=$T --release --no-default-features --features="full_crypto,bls-experimental" +cargo check --target=$T --release --no-default-features --features="bandersnatch-experimental" +cargo check --target=$T --release --no-default-features --features="full_crypto,serde,bandersnatch-experimental" +cargo check --target=$T --release --no-default-features --features="full_crypto,serde" +cargo check --target=$T --release --no-default-features --features="full_crypto" +cargo check --target=$T --release --no-default-features --features="serde" +cargo check --target=$T --release --no-default-features diff --git a/substrate/primitives/core/src/address_uri.rs b/substrate/primitives/core/src/address_uri.rs index 211d47c0093d8800646c9a47273128af18edbad5..2e32d0cd86dffea77bb47a2b025d67502a72898b 100644 --- a/substrate/primitives/core/src/address_uri.rs +++ b/substrate/primitives/core/src/address_uri.rs @@ -17,7 +17,7 @@ //! Little util for parsing an address URI. Replaces regular expressions. -#[cfg(all(not(feature = "std"), any(feature = "serde", feature = "full_crypto")))] +#[cfg(not(feature = "std"))] use sp_std::{ alloc::string::{String, ToString}, vec::Vec, diff --git a/substrate/primitives/core/src/bandersnatch.rs b/substrate/primitives/core/src/bandersnatch.rs index 61e7162544a602ba2c5577807cfbc94d143dad22..5cae6047f164e9ae92896e6502a1082061db1a32 100644 --- a/substrate/primitives/core/src/bandersnatch.rs +++ b/substrate/primitives/core/src/bandersnatch.rs @@ -22,19 +22,18 @@ #[cfg(feature = "serde")] use crate::crypto::Ss58Codec; +#[cfg(feature = "full_crypto")] +use crate::crypto::VrfSecret; use crate::crypto::{ - ByteArray, CryptoType, CryptoTypeId, Derive, Public as TraitPublic, UncheckedFrom, VrfPublic, + ByteArray, CryptoType, CryptoTypeId, Derive, DeriveError, DeriveJunction, Pair as TraitPair, + Public as TraitPublic, SecretStringError, UncheckedFrom, VrfPublic, }; -#[cfg(feature = "full_crypto")] -use crate::crypto::{DeriveError, DeriveJunction, Pair as TraitPair, SecretStringError, VrfSecret}; #[cfg(feature = "serde")] use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; #[cfg(all(not(feature = "std"), feature = "serde"))] use sp_std::alloc::{format, string::String}; -use bandersnatch_vrfs::CanonicalSerialize; -#[cfg(feature = "full_crypto")] -use bandersnatch_vrfs::SecretKey; +use bandersnatch_vrfs::{CanonicalSerialize, SecretKey}; use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; use scale_info::TypeInfo; @@ -45,10 +44,8 @@ use sp_std::{vec, vec::Vec}; pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"band"); /// Context used to produce a plain signature without any VRF input/output. -#[cfg(feature = "full_crypto")] pub const SIGNING_CTX: &[u8] = b"BandersnatchSigningContext"; -#[cfg(feature = "full_crypto")] const SEED_SERIALIZED_SIZE: usize = 32; const PUBLIC_SERIALIZED_SIZE: usize = 33; @@ -56,7 +53,6 @@ const SIGNATURE_SERIALIZED_SIZE: usize = 65; const PREOUT_SERIALIZED_SIZE: usize = 33; /// Bandersnatch public key. -#[cfg_attr(feature = "full_crypto", derive(Hash))] #[derive( Clone, Copy, @@ -69,6 +65,7 @@ const PREOUT_SERIALIZED_SIZE: usize = 33; PassByInner, MaxEncodedLen, TypeInfo, + Hash, )] pub struct Public(pub [u8; PUBLIC_SERIALIZED_SIZE]); @@ -116,7 +113,6 @@ impl ByteArray for Public { impl TraitPublic for Public {} impl CryptoType for Public { - #[cfg(feature = "full_crypto")] type Pair = Pair; } @@ -154,8 +150,9 @@ impl<'de> Deserialize<'de> for Public { /// /// The signature is created via the [`VrfSecret::vrf_sign`] using [`SIGNING_CTX`] as transcript /// `label`. -#[cfg_attr(feature = "full_crypto", derive(Hash))] -#[derive(Clone, Copy, PartialEq, Eq, Encode, Decode, PassByInner, MaxEncodedLen, TypeInfo)] +#[derive( + Clone, Copy, PartialEq, Eq, Encode, Decode, PassByInner, MaxEncodedLen, TypeInfo, Hash, +)] pub struct Signature([u8; SIGNATURE_SERIALIZED_SIZE]); impl UncheckedFrom<[u8; SIGNATURE_SERIALIZED_SIZE]> for Signature { @@ -194,7 +191,6 @@ impl ByteArray for Signature { } impl CryptoType for Signature { - #[cfg(feature = "full_crypto")] type Pair = Pair; } @@ -211,18 +207,15 @@ impl sp_std::fmt::Debug for Signature { } /// The raw secret seed, which can be used to reconstruct the secret [`Pair`]. -#[cfg(feature = "full_crypto")] type Seed = [u8; SEED_SERIALIZED_SIZE]; /// Bandersnatch secret key. -#[cfg(feature = "full_crypto")] #[derive(Clone)] pub struct Pair { secret: SecretKey, seed: Seed, } -#[cfg(feature = "full_crypto")] impl Pair { /// Get the key seed. pub fn seed(&self) -> Seed { @@ -230,7 +223,6 @@ impl Pair { } } -#[cfg(feature = "full_crypto")] impl TraitPair for Pair { type Seed = Seed; type Public = Public; @@ -287,6 +279,7 @@ impl TraitPair for Pair { /// the constant label [`SIGNING_CTX`] and `data` without any additional data. /// /// See [`vrf::VrfSignData`] for additional details. + #[cfg(feature = "full_crypto")] fn sign(&self, data: &[u8]) -> Signature { let data = vrf::VrfSignData::new_unchecked(SIGNING_CTX, &[data], None); self.vrf_sign(&data).signature @@ -305,7 +298,6 @@ impl TraitPair for Pair { } } -#[cfg(feature = "full_crypto")] impl CryptoType for Pair { type Pair = Pair; } diff --git a/substrate/primitives/core/src/bls.rs b/substrate/primitives/core/src/bls.rs index 0c84d0ba8e6c0fd295690ca5fddf48440f083379..2256e4cd823dc3184f7fc86adb3ddc0f01e0dd7a 100644 --- a/substrate/primitives/core/src/bls.rs +++ b/substrate/primitives/core/src/bls.rs @@ -25,11 +25,11 @@ #[cfg(feature = "serde")] use crate::crypto::Ss58Codec; -use crate::crypto::{ByteArray, CryptoType, Derive, Public as TraitPublic, UncheckedFrom}; -#[cfg(feature = "full_crypto")] -use crate::crypto::{DeriveError, DeriveJunction, Pair as TraitPair, SecretStringError}; +use crate::crypto::{ + ByteArray, CryptoType, Derive, DeriveError, DeriveJunction, Pair as TraitPair, + Public as TraitPublic, SecretStringError, UncheckedFrom, +}; -#[cfg(feature = "full_crypto")] use sp_std::vec::Vec; use codec::{Decode, Encode, MaxEncodedLen}; @@ -40,9 +40,10 @@ use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; #[cfg(all(not(feature = "std"), feature = "serde"))] use sp_std::alloc::{format, string::String}; -use w3f_bls::{DoublePublicKey, DoubleSignature, EngineBLS, SerializableToBytes, TinyBLS381}; -#[cfg(feature = "full_crypto")] -use w3f_bls::{DoublePublicKeyScheme, Keypair, Message, SecretKey}; +use w3f_bls::{ + DoublePublicKey, DoublePublicKeyScheme, DoubleSignature, EngineBLS, Keypair, Message, + SecretKey, SerializableToBytes, TinyBLS381, +}; use sp_runtime_interface::pass_by::{self, PassBy, PassByInner}; use sp_std::{convert::TryFrom, marker::PhantomData, ops::Deref}; @@ -57,7 +58,6 @@ pub mod bls377 { pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"bls7"); /// BLS12-377 key pair. - #[cfg(feature = "full_crypto")] pub type Pair = super::Pair; /// BLS12-377 public key. pub type Public = super::Public; @@ -79,7 +79,6 @@ pub mod bls381 { pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"bls8"); /// BLS12-381 key pair. - #[cfg(feature = "full_crypto")] pub type Pair = super::Pair; /// BLS12-381 public key. pub type Public = super::Public; @@ -96,7 +95,6 @@ trait BlsBound: EngineBLS + HardJunctionId + Send + Sync + 'static {} impl BlsBound for T {} /// Secret key serialized size -#[cfg(feature = "full_crypto")] const SECRET_KEY_SERIALIZED_SIZE: usize = as SerializableToBytes>::SERIALIZED_BYTES_SIZE; @@ -113,7 +111,6 @@ pub const SIGNATURE_SERIALIZED_SIZE: usize = /// It's not called a "secret key" because ring doesn't expose the secret keys /// of the key pair (yeah, dumb); as such we're forced to remember the seed manually if we /// will need it later (such as for HDKD). -#[cfg(feature = "full_crypto")] type Seed = [u8; SECRET_KEY_SERIALIZED_SIZE]; /// A public key. @@ -150,7 +147,6 @@ impl Ord for Public { } } -#[cfg(feature = "full_crypto")] impl sp_std::hash::Hash for Public { fn hash(&self, state: &mut H) { self.inner.hash(state) @@ -226,7 +222,6 @@ impl From> for [u8; PUBLIC_KEY_SERIALIZED_SIZE] { } } -#[cfg(feature = "full_crypto")] impl From> for Public { fn from(x: Pair) -> Self { x.public() @@ -296,7 +291,6 @@ impl TraitPublic for Public {} impl Derive for Public {} impl CryptoType for Public { - #[cfg(feature = "full_crypto")] type Pair = Pair; } @@ -322,7 +316,6 @@ impl PartialEq for Signature { impl Eq for Signature {} -#[cfg(feature = "full_crypto")] impl sp_std::hash::Hash for Signature { fn hash(&self, state: &mut H) { self.inner.hash(state) @@ -412,15 +405,12 @@ impl UncheckedFrom<[u8; SIGNATURE_SERIALIZED_SIZE]> for Signature { } impl CryptoType for Signature { - #[cfg(feature = "full_crypto")] type Pair = Pair; } /// A key pair. -#[cfg(feature = "full_crypto")] pub struct Pair(Keypair); -#[cfg(feature = "full_crypto")] impl Clone for Pair { fn clone(&self) -> Self { Pair(self.0.clone()) @@ -432,15 +422,12 @@ trait HardJunctionId { } /// Derive a single hard junction. -#[cfg(feature = "full_crypto")] fn derive_hard_junction(secret_seed: &Seed, cc: &[u8; 32]) -> Seed { (T::ID, secret_seed, cc).using_encoded(sp_crypto_hashing::blake2_256) } -#[cfg(feature = "full_crypto")] impl Pair {} -#[cfg(feature = "full_crypto")] impl TraitPair for Pair { type Seed = Seed; type Public = Public; @@ -480,6 +467,7 @@ impl TraitPair for Pair { Self::Public::unchecked_from(raw) } + #[cfg(feature = "full_crypto")] fn sign(&self, message: &[u8]) -> Self::Signature { let mut mutable_self = self.clone(); let r: [u8; SIGNATURE_SERIALIZED_SIZE] = @@ -523,7 +511,6 @@ impl TraitPair for Pair { } } -#[cfg(feature = "full_crypto")] impl CryptoType for Pair { type Pair = Pair; } diff --git a/substrate/primitives/core/src/crypto.rs b/substrate/primitives/core/src/crypto.rs index 2a8be2a2ba8503d7dca2d324e405f6a1af840247..fab865d8c1f4215ccc3fbf38f19543117d5a52c2 100644 --- a/substrate/primitives/core/src/crypto.rs +++ b/substrate/primitives/core/src/crypto.rs @@ -18,7 +18,6 @@ //! Cryptographic utilities. use crate::{ed25519, sr25519}; -#[cfg(feature = "std")] use bip39::{Language, Mnemonic}; use codec::{Decode, Encode, MaxEncodedLen}; #[cfg(feature = "std")] @@ -26,7 +25,6 @@ use itertools::Itertools; #[cfg(feature = "std")] use rand::{rngs::OsRng, RngCore}; use scale_info::TypeInfo; -#[cfg(feature = "std")] pub use secrecy::{ExposeSecret, SecretString}; use sp_runtime_interface::pass_by::PassByInner; #[doc(hidden)] @@ -41,10 +39,7 @@ pub use ss58_registry::{from_known_address_format, Ss58AddressFormat, Ss58Addres /// Trait to zeroize a memory buffer. pub use zeroize::Zeroize; -#[cfg(feature = "std")] -pub use crate::address_uri::AddressUri; -#[cfg(any(feature = "std", feature = "full_crypto"))] -pub use crate::address_uri::Error as AddressUriError; +pub use crate::address_uri::{AddressUri, Error as AddressUriError}; /// The root phrase for our publicly known keys. pub const DEV_PHRASE: &str = @@ -82,7 +77,6 @@ impl> UncheckedInto for S { /// An error with the interpretation of a secret. #[cfg_attr(feature = "std", derive(thiserror::Error))] #[derive(Debug, Clone, PartialEq, Eq)] -#[cfg(feature = "full_crypto")] pub enum SecretStringError { /// The overall format was invalid (e.g. the seed phrase contained symbols). #[cfg_attr(feature = "std", error("Invalid format {0}"))] @@ -104,7 +98,6 @@ pub enum SecretStringError { InvalidPath, } -#[cfg(any(feature = "std", feature = "full_crypto"))] impl From for SecretStringError { fn from(e: AddressUriError) -> Self { Self::InvalidFormat(e) @@ -114,7 +107,6 @@ impl From for SecretStringError { /// An error when deriving a key. #[cfg_attr(feature = "std", derive(thiserror::Error))] #[derive(Debug, Clone, PartialEq, Eq)] -#[cfg(feature = "full_crypto")] pub enum DeriveError { /// A soft key was found in the path (and is unsupported). #[cfg_attr(feature = "std", error("Soft key in path"))] @@ -125,7 +117,6 @@ pub enum DeriveError { /// a new secret key from an existing secret key and, in the case of `SoftRaw` and `SoftIndex` /// a new public key from an existing public key. #[derive(Copy, Clone, Eq, PartialEq, Hash, Debug, Encode, Decode)] -#[cfg(any(feature = "full_crypto", feature = "serde"))] pub enum DeriveJunction { /// Soft (vanilla) derivation. Public keys have a correspondent derivation. Soft([u8; JUNCTION_ID_LEN]), @@ -133,7 +124,6 @@ pub enum DeriveJunction { Hard([u8; JUNCTION_ID_LEN]), } -#[cfg(any(feature = "full_crypto", feature = "serde"))] impl DeriveJunction { /// Consume self to return a soft derive junction with the same chain code. pub fn soften(self) -> Self { @@ -192,7 +182,6 @@ impl DeriveJunction { } } -#[cfg(any(feature = "full_crypto", feature = "serde"))] impl> From for DeriveJunction { fn from(j: T) -> DeriveJunction { let j = j.as_ref(); @@ -812,7 +801,6 @@ mod dummy { /// assert_eq!("0xe5be9a5092b81bca64be81d212e7f2f9eba183bb7a90954f7b76361f6edb5c0a", suri.phrase.expose_secret()); /// assert!(suri.password.is_none()); /// ``` -#[cfg(feature = "std")] pub struct SecretUri { /// The phrase to derive the private key. /// @@ -824,7 +812,6 @@ pub struct SecretUri { pub junctions: Vec, } -#[cfg(feature = "std")] impl sp_std::str::FromStr for SecretUri { type Err = SecretStringError; @@ -845,7 +832,6 @@ impl sp_std::str::FromStr for SecretUri { /// Trait suitable for typical cryptographic PKI key pair type. /// /// For now it just specifies how to create a key from a phrase and derivation path. -#[cfg(feature = "full_crypto")] pub trait Pair: CryptoType + Sized { /// The type which is used to encode a public key. type Public: Public + Hash; @@ -878,21 +864,19 @@ pub trait Pair: CryptoType + Sized { #[cfg(feature = "std")] fn generate_with_phrase(password: Option<&str>) -> (Self, String, Self::Seed) { let mnemonic = Mnemonic::generate(12).expect("Mnemonic generation always works; qed"); - let phrase = mnemonic.word_iter().join(" "); + let phrase = mnemonic.words().join(" "); let (pair, seed) = Self::from_phrase(&phrase, password) .expect("All phrases generated by Mnemonic are valid; qed"); (pair, phrase.to_owned(), seed) } /// Returns the KeyPair from the English BIP39 seed `phrase`, or an error if it's invalid. - #[cfg(feature = "std")] fn from_phrase( phrase: &str, password: Option<&str>, ) -> Result<(Self, Self::Seed), SecretStringError> { let mnemonic = Mnemonic::parse_in(Language::English, phrase) .map_err(|_| SecretStringError::InvalidPhrase)?; - let (entropy, entropy_len) = mnemonic.to_entropy_array(); let big_seed = substrate_bip39::seed_from_entropy(&entropy[0..entropy_len], password.unwrap_or("")) @@ -928,6 +912,7 @@ pub trait Pair: CryptoType + Sized { fn from_seed_slice(seed: &[u8]) -> Result; /// Sign a message. + #[cfg(feature = "full_crypto")] fn sign(&self, message: &[u8]) -> Self::Signature; /// Verify a signature on a message. Returns true if the signature is good. @@ -962,7 +947,6 @@ pub trait Pair: CryptoType + Sized { /// Notably, integer junction indices may be legally prefixed with arbitrary number of zeros. /// Similarly an empty password (ending the SURI with `///`) is perfectly valid and will /// generally be equivalent to no password at all. - #[cfg(feature = "std")] fn from_string_with_seed( s: &str, password_override: Option<&str>, @@ -996,7 +980,6 @@ pub trait Pair: CryptoType + Sized { /// Interprets the string `s` in order to generate a key pair. /// /// See [`from_string_with_seed`](Pair::from_string_with_seed) for more extensive documentation. - #[cfg(feature = "std")] fn from_string(s: &str, password_override: Option<&str>) -> Result { Self::from_string_with_seed(s, password_override).map(|x| x.0) } @@ -1054,7 +1037,6 @@ where /// Type which has a particular kind of crypto associated with it. pub trait CryptoType { /// The pair key type of this crypto. - #[cfg(feature = "full_crypto")] type Pair: Pair; } diff --git a/substrate/primitives/core/src/ecdsa.rs b/substrate/primitives/core/src/ecdsa.rs index f172b3a7d02c7a1adb1ff2fc37933d3cf47a165f..fa071e1b03ff8df61d79d0815c5979bb4fc95489 100644 --- a/substrate/primitives/core/src/ecdsa.rs +++ b/substrate/primitives/core/src/ecdsa.rs @@ -24,24 +24,22 @@ use sp_runtime_interface::pass_by::PassByInner; #[cfg(feature = "serde")] use crate::crypto::Ss58Codec; use crate::crypto::{ - ByteArray, CryptoType, CryptoTypeId, Derive, Public as TraitPublic, UncheckedFrom, + ByteArray, CryptoType, CryptoTypeId, Derive, DeriveError, DeriveJunction, Pair as TraitPair, + Public as TraitPublic, SecretStringError, UncheckedFrom, }; -#[cfg(feature = "full_crypto")] -use crate::crypto::{DeriveError, DeriveJunction, Pair as TraitPair, SecretStringError}; -#[cfg(all(feature = "full_crypto", not(feature = "std")))] -use secp256k1::Secp256k1; + +#[cfg(not(feature = "std"))] +use k256::ecdsa::{SigningKey as SecretKey, VerifyingKey}; #[cfg(feature = "std")] -use secp256k1::SECP256K1; -#[cfg(feature = "full_crypto")] use secp256k1::{ ecdsa::{RecoverableSignature, RecoveryId}, - Message, PublicKey, SecretKey, + Message, PublicKey, SecretKey, SECP256K1, }; #[cfg(feature = "serde")] use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; #[cfg(all(not(feature = "std"), feature = "serde"))] use sp_std::alloc::{format, string::String}; -#[cfg(feature = "full_crypto")] +#[cfg(not(feature = "std"))] use sp_std::vec::Vec; /// An identifier used to match public keys against ecdsa keys @@ -53,14 +51,12 @@ pub const PUBLIC_KEY_SERIALIZED_SIZE: usize = 33; /// The byte length of signature pub const SIGNATURE_SERIALIZED_SIZE: usize = 65; -/// A secret seed (which is bytewise essentially equivalent to a SecretKey). +/// The secret seed. /// -/// We need it as a different type because `Seed` is expected to be AsRef<[u8]>. -#[cfg(feature = "full_crypto")] +/// The raw secret seed, which can be used to create the `Pair`. type Seed = [u8; 32]; /// The ECDSA compressed public key. -#[cfg_attr(feature = "full_crypto", derive(Hash))] #[derive( Clone, Copy, @@ -73,6 +69,7 @@ type Seed = [u8; 32]; PartialEq, PartialOrd, Ord, + Hash, )] pub struct Public(pub [u8; PUBLIC_KEY_SERIALIZED_SIZE]); @@ -96,18 +93,21 @@ impl Public { /// Create a new instance from the given full public key. /// /// This will convert the full public key into the compressed format. - #[cfg(feature = "std")] pub fn from_full(full: &[u8]) -> Result { - let pubkey = if full.len() == 64 { + let mut tagged_full = [0u8; 65]; + let full = if full.len() == 64 { // Tag it as uncompressed public key. - let mut tagged_full = [0u8; 65]; tagged_full[0] = 0x04; tagged_full[1..].copy_from_slice(full); - secp256k1::PublicKey::from_slice(&tagged_full) + &tagged_full } else { - secp256k1::PublicKey::from_slice(full) + full }; - pubkey.map(|k| Self(k.serialize())).map_err(|_| ()) + #[cfg(feature = "std")] + let pubkey = PublicKey::from_slice(&full); + #[cfg(not(feature = "std"))] + let pubkey = VerifyingKey::from_sec1_bytes(&full); + pubkey.map(|k| k.into()).map_err(|_| ()) } } @@ -131,6 +131,24 @@ impl AsMut<[u8]> for Public { } } +#[cfg(feature = "std")] +impl From for Public { + fn from(pubkey: PublicKey) -> Self { + Self(pubkey.serialize()) + } +} + +#[cfg(not(feature = "std"))] +impl From for Public { + fn from(pubkey: VerifyingKey) -> Self { + Self::unchecked_from( + pubkey.to_sec1_bytes()[..] + .try_into() + .expect("valid key is serializable to [u8,33]. qed."), + ) + } +} + impl TryFrom<&[u8]> for Public { type Error = (); @@ -199,8 +217,7 @@ impl<'de> Deserialize<'de> for Public { } /// A signature (a 512-bit value, plus 8 bits for recovery ID). -#[cfg_attr(feature = "full_crypto", derive(Hash))] -#[derive(Encode, Decode, MaxEncodedLen, PassByInner, TypeInfo, PartialEq, Eq)] +#[derive(Hash, Encode, Decode, MaxEncodedLen, PassByInner, TypeInfo, PartialEq, Eq)] pub struct Signature(pub [u8; SIGNATURE_SERIALIZED_SIZE]); impl ByteArray for Signature { @@ -323,31 +340,40 @@ impl Signature { } /// Recover the public key from this signature and a message. - #[cfg(feature = "full_crypto")] pub fn recover>(&self, message: M) -> Option { self.recover_prehashed(&sp_crypto_hashing::blake2_256(message.as_ref())) } /// Recover the public key from this signature and a pre-hashed message. - #[cfg(feature = "full_crypto")] pub fn recover_prehashed(&self, message: &[u8; 32]) -> Option { - let rid = RecoveryId::from_i32(self.0[64] as i32).ok()?; - let sig = RecoverableSignature::from_compact(&self.0[..64], rid).ok()?; - let message = Message::from_digest_slice(message).expect("Message is 32 bytes; qed"); - #[cfg(feature = "std")] - let context = SECP256K1; + { + let rid = RecoveryId::from_i32(self.0[64] as i32).ok()?; + let sig = RecoverableSignature::from_compact(&self.0[..64], rid).ok()?; + let message = Message::from_digest_slice(message).expect("Message is 32 bytes; qed"); + SECP256K1.recover_ecdsa(&message, &sig).ok().map(Public::from) + } + #[cfg(not(feature = "std"))] - let context = Secp256k1::verification_only(); + { + let rid = k256::ecdsa::RecoveryId::from_byte(self.0[64])?; + let sig = k256::ecdsa::Signature::from_bytes((&self.0[..64]).into()).ok()?; + VerifyingKey::recover_from_prehash(message, &sig, rid).map(Public::from).ok() + } + } +} - context - .recover_ecdsa(&message, &sig) - .ok() - .map(|pubkey| Public(pubkey.serialize())) +#[cfg(not(feature = "std"))] +impl From<(k256::ecdsa::Signature, k256::ecdsa::RecoveryId)> for Signature { + fn from(recsig: (k256::ecdsa::Signature, k256::ecdsa::RecoveryId)) -> Signature { + let mut r = Self::default(); + r.0[..64].copy_from_slice(&recsig.0.to_bytes()); + r.0[64] = recsig.1.to_byte(); + r } } -#[cfg(feature = "full_crypto")] +#[cfg(feature = "std")] impl From for Signature { fn from(recsig: RecoverableSignature) -> Signature { let mut r = Self::default(); @@ -360,20 +386,17 @@ impl From for Signature { } /// Derive a single hard junction. -#[cfg(feature = "full_crypto")] fn derive_hard_junction(secret_seed: &Seed, cc: &[u8; 32]) -> Seed { ("Secp256k1HDKD", secret_seed, cc).using_encoded(sp_crypto_hashing::blake2_256) } /// A key pair. -#[cfg(feature = "full_crypto")] #[derive(Clone)] pub struct Pair { public: Public, secret: SecretKey, } -#[cfg(feature = "full_crypto")] impl TraitPair for Pair { type Public = Public; type Seed = Seed; @@ -384,17 +407,19 @@ impl TraitPair for Pair { /// /// You should never need to use this; generate(), generate_with_phrase fn from_seed_slice(seed_slice: &[u8]) -> Result { - let secret = - SecretKey::from_slice(seed_slice).map_err(|_| SecretStringError::InvalidSeedLength)?; - #[cfg(feature = "std")] - let context = SECP256K1; - #[cfg(not(feature = "std"))] - let context = Secp256k1::signing_only(); + { + let secret = SecretKey::from_slice(seed_slice) + .map_err(|_| SecretStringError::InvalidSeedLength)?; + Ok(Pair { public: PublicKey::from_secret_key(&SECP256K1, &secret).into(), secret }) + } - let public = PublicKey::from_secret_key(&context, &secret); - let public = Public(public.serialize()); - Ok(Pair { public, secret }) + #[cfg(not(feature = "std"))] + { + let secret = SecretKey::from_slice(seed_slice) + .map_err(|_| SecretStringError::InvalidSeedLength)?; + Ok(Pair { public: VerifyingKey::from(&secret).into(), secret }) + } } /// Derive a child key from a series of given junctions. @@ -419,6 +444,7 @@ impl TraitPair for Pair { } /// Sign a message. + #[cfg(feature = "full_crypto")] fn sign(&self, message: &[u8]) -> Signature { self.sign_prehashed(&sp_crypto_hashing::blake2_256(message)) } @@ -434,11 +460,17 @@ impl TraitPair for Pair { } } -#[cfg(feature = "full_crypto")] impl Pair { /// Get the seed for this key. pub fn seed(&self) -> Seed { - self.secret.secret_bytes() + #[cfg(feature = "std")] + { + self.secret.secret_bytes() + } + #[cfg(not(feature = "std"))] + { + self.secret.to_bytes().into() + } } /// Exactly as `from_string` except that if no matches are found then, the the first 32 @@ -454,15 +486,21 @@ impl Pair { } /// Sign a pre-hashed message + #[cfg(feature = "full_crypto")] pub fn sign_prehashed(&self, message: &[u8; 32]) -> Signature { - let message = Message::from_digest_slice(message).expect("Message is 32 bytes; qed"); - #[cfg(feature = "std")] - let context = SECP256K1; - #[cfg(not(feature = "std"))] - let context = Secp256k1::signing_only(); + { + let message = Message::from_digest_slice(message).expect("Message is 32 bytes; qed"); + SECP256K1.sign_ecdsa_recoverable(&message, &self.secret).into() + } - context.sign_ecdsa_recoverable(&message, &self.secret).into() + #[cfg(not(feature = "std"))] + { + self.secret + .sign_prehash_recoverable(message) + .expect("signing may not fail (???). qed.") + .into() + } } /// Verify a signature on a pre-hashed message. Return `true` if the signature is valid @@ -503,7 +541,7 @@ impl Pair { // NOTE: this solution is not effective when `Pair` is moved around memory. // The very same problem affects other cryptographic backends that are just using // `zeroize`for their secrets. -#[cfg(feature = "full_crypto")] +#[cfg(feature = "std")] impl Drop for Pair { fn drop(&mut self) { self.secret.non_secure_erase() @@ -511,16 +549,13 @@ impl Drop for Pair { } impl CryptoType for Public { - #[cfg(feature = "full_crypto")] type Pair = Pair; } impl CryptoType for Signature { - #[cfg(feature = "full_crypto")] type Pair = Pair; } -#[cfg(feature = "full_crypto")] impl CryptoType for Pair { type Pair = Pair; } @@ -770,8 +805,18 @@ mod test { let msg = [0u8; 32]; let sig1 = pair.sign_prehashed(&msg); let sig2: Signature = { - let message = Message::from_digest_slice(&msg).unwrap(); - SECP256K1.sign_ecdsa_recoverable(&message, &pair.secret).into() + #[cfg(feature = "std")] + { + let message = Message::from_digest_slice(&msg).unwrap(); + SECP256K1.sign_ecdsa_recoverable(&message, &pair.secret).into() + } + #[cfg(not(feature = "std"))] + { + pair.secret + .sign_prehash_recoverable(&msg) + .expect("signing may not fail (???). qed.") + .into() + } }; assert_eq!(sig1, sig2); diff --git a/substrate/primitives/core/src/ed25519.rs b/substrate/primitives/core/src/ed25519.rs index 60ebd93e12d43d6d331c6758508f843145031e74..9f42b36dc8bd562022fa7443db0fcbbae3687c10 100644 --- a/substrate/primitives/core/src/ed25519.rs +++ b/substrate/primitives/core/src/ed25519.rs @@ -19,7 +19,6 @@ //! Simple Ed25519 API. // end::description[] -#[cfg(feature = "full_crypto")] use sp_std::vec::Vec; use crate::{ @@ -32,13 +31,11 @@ use scale_info::TypeInfo; #[cfg(feature = "serde")] use crate::crypto::Ss58Codec; use crate::crypto::{ - CryptoType, CryptoTypeId, Derive, FromEntropy, Public as TraitPublic, UncheckedFrom, + CryptoType, CryptoTypeId, Derive, DeriveError, DeriveJunction, FromEntropy, Pair as TraitPair, + Public as TraitPublic, SecretStringError, UncheckedFrom, }; #[cfg(feature = "full_crypto")] -use crate::crypto::{DeriveError, DeriveJunction, Pair as TraitPair, SecretStringError}; -#[cfg(feature = "full_crypto")] use core::convert::TryFrom; -#[cfg(feature = "full_crypto")] use ed25519_zebra::{SigningKey, VerificationKey}; #[cfg(feature = "serde")] use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; @@ -53,11 +50,9 @@ pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"ed25"); /// A secret seed. It's not called a "secret key" because ring doesn't expose the secret keys /// of the key pair (yeah, dumb); as such we're forced to remember the seed manually if we /// will need it later (such as for HDKD). -#[cfg(feature = "full_crypto")] type Seed = [u8; 32]; /// A public key. -#[cfg_attr(feature = "full_crypto", derive(Hash))] #[derive( PartialEq, Eq, @@ -70,11 +65,11 @@ type Seed = [u8; 32]; PassByInner, MaxEncodedLen, TypeInfo, + Hash, )] pub struct Public(pub [u8; 32]); /// A key pair. -#[cfg(feature = "full_crypto")] #[derive(Copy, Clone)] pub struct Pair { public: VerificationKey, @@ -210,8 +205,7 @@ impl<'de> Deserialize<'de> for Public { } /// A signature (a 512-bit value). -#[cfg_attr(feature = "full_crypto", derive(Hash))] -#[derive(Encode, Decode, MaxEncodedLen, PassByInner, TypeInfo, PartialEq, Eq)] +#[derive(Encode, Decode, MaxEncodedLen, PassByInner, TypeInfo, PartialEq, Eq, Hash)] pub struct Signature(pub [u8; 64]); impl TryFrom<&[u8]> for Signature { @@ -370,12 +364,10 @@ impl TraitPublic for Public {} impl Derive for Public {} /// Derive a single hard junction. -#[cfg(feature = "full_crypto")] fn derive_hard_junction(secret_seed: &Seed, cc: &[u8; 32]) -> Seed { ("Ed25519HDKD", secret_seed, cc).using_encoded(sp_crypto_hashing::blake2_256) } -#[cfg(feature = "full_crypto")] impl TraitPair for Pair { type Public = Public; type Seed = Seed; @@ -414,6 +406,7 @@ impl TraitPair for Pair { } /// Sign a message. + #[cfg(feature = "full_crypto")] fn sign(&self, message: &[u8]) -> Signature { Signature::from_raw(self.secret.sign(message).into()) } @@ -433,7 +426,6 @@ impl TraitPair for Pair { } } -#[cfg(feature = "full_crypto")] impl Pair { /// Get the seed for this key. pub fn seed(&self) -> Seed { @@ -454,16 +446,13 @@ impl Pair { } impl CryptoType for Public { - #[cfg(feature = "full_crypto")] type Pair = Pair; } impl CryptoType for Signature { - #[cfg(feature = "full_crypto")] type Pair = Pair; } -#[cfg(feature = "full_crypto")] impl CryptoType for Pair { type Pair = Pair; } diff --git a/substrate/primitives/core/src/lib.rs b/substrate/primitives/core/src/lib.rs index 0d43eea99629915715f452da409d53522d199e53..c0b41234460dad301056ae542eaa2721522487fc 100644 --- a/substrate/primitives/core/src/lib.rs +++ b/substrate/primitives/core/src/lib.rs @@ -46,7 +46,6 @@ pub use sp_debug_derive::RuntimeDebug; #[cfg(feature = "serde")] pub use impl_serde::serialize as bytes; -#[cfg(feature = "full_crypto")] #[deprecated( since = "27.0.0", note = "`sp-crypto-hashing` re-exports will be removed after June 2024. Use `sp-crypto-hashing` instead." @@ -58,7 +57,6 @@ pub mod crypto; pub mod hexdisplay; pub use paste; -#[cfg(any(feature = "full_crypto", feature = "std"))] mod address_uri; #[cfg(feature = "bandersnatch-experimental")] pub mod bandersnatch; @@ -87,7 +85,6 @@ pub use self::{ hash::{convert_hash, H160, H256, H512}, uint::{U256, U512}, }; -#[cfg(feature = "full_crypto")] pub use crypto::{ByteArray, DeriveJunction, Pair, Public}; #[cfg(feature = "std")] diff --git a/substrate/primitives/core/src/paired_crypto.rs b/substrate/primitives/core/src/paired_crypto.rs index 20b32c339bd763d4847f4d36626635a4226f80d9..6d2e6ddf3348bf34443e0fc7fa96f75e2ba55467 100644 --- a/substrate/primitives/core/src/paired_crypto.rs +++ b/substrate/primitives/core/src/paired_crypto.rs @@ -19,11 +19,11 @@ #[cfg(feature = "serde")] use crate::crypto::Ss58Codec; -use crate::crypto::{ByteArray, CryptoType, Derive, Public as PublicT, UncheckedFrom}; -#[cfg(feature = "full_crypto")] -use crate::crypto::{DeriveError, DeriveJunction, Pair as PairT, SecretStringError}; +use crate::crypto::{ + ByteArray, CryptoType, Derive, DeriveError, DeriveJunction, Pair as PairT, Public as PublicT, + SecretStringError, UncheckedFrom, +}; -#[cfg(feature = "full_crypto")] use sp_std::vec::Vec; use codec::{Decode, Encode, MaxEncodedLen}; @@ -39,12 +39,11 @@ use sp_std::convert::TryFrom; /// ECDSA and BLS12-377 paired crypto scheme #[cfg(feature = "bls-experimental")] pub mod ecdsa_bls377 { + use crate::{bls377, crypto::CryptoTypeId, ecdsa}; #[cfg(feature = "full_crypto")] - use crate::Hasher; use crate::{ - bls377, - crypto::{CryptoTypeId, Pair as PairT, UncheckedFrom}, - ecdsa, + crypto::{Pair as PairT, UncheckedFrom}, + Hasher, }; /// An identifier used to match public keys against BLS12-377 keys @@ -56,7 +55,6 @@ pub mod ecdsa_bls377 { ecdsa::SIGNATURE_SERIALIZED_SIZE + bls377::SIGNATURE_SERIALIZED_SIZE; /// (ECDSA,BLS12-377) key-pair pair. - #[cfg(feature = "full_crypto")] pub type Pair = super::Pair; /// (ECDSA,BLS12-377) public key pair. pub type Public = super::Public; @@ -64,16 +62,13 @@ pub mod ecdsa_bls377 { pub type Signature = super::Signature; impl super::CryptoType for Public { - #[cfg(feature = "full_crypto")] type Pair = Pair; } impl super::CryptoType for Signature { - #[cfg(feature = "full_crypto")] type Pair = Pair; } - #[cfg(feature = "full_crypto")] impl super::CryptoType for Pair { type Pair = Pair; } @@ -136,7 +131,6 @@ pub mod ecdsa_bls377 { /// Secure seed length. /// /// Currently only supporting sub-schemes whose seed is a 32-bytes array. -#[cfg(feature = "full_crypto")] const SECURE_SEED_LEN: usize = 32; /// A secret seed. @@ -144,14 +138,12 @@ const SECURE_SEED_LEN: usize = 32; /// It's not called a "secret key" because ring doesn't expose the secret keys /// of the key pair (yeah, dumb); as such we're forced to remember the seed manually if we /// will need it later (such as for HDKD). -#[cfg(feature = "full_crypto")] type Seed = [u8; SECURE_SEED_LEN]; /// A public key. #[derive(Clone, Encode, Decode, MaxEncodedLen, TypeInfo, PartialEq, Eq, PartialOrd, Ord)] pub struct Public([u8; LEFT_PLUS_RIGHT_LEN]); -#[cfg(feature = "full_crypto")] impl sp_std::hash::Hash for Public { fn hash(&self, state: &mut H) { self.0.hash(state); @@ -215,7 +207,6 @@ impl PassBy for Public { type PassBy = pass_by::Inner; } -#[cfg(feature = "full_crypto")] impl< LeftPair: PairT, RightPair: PairT, @@ -311,7 +302,6 @@ impl SignatureBound for T {} #[derive(Clone, Encode, Decode, MaxEncodedLen, TypeInfo, PartialEq, Eq)] pub struct Signature([u8; LEFT_PLUS_RIGHT_LEN]); -#[cfg(feature = "full_crypto")] impl sp_std::hash::Hash for Signature { fn hash(&self, state: &mut H) { self.0.hash(state); @@ -411,7 +401,6 @@ impl UncheckedFrom<[u8; LEFT_PLUS_RIGHT_LEN]> } /// A key pair. -#[cfg(feature = "full_crypto")] #[derive(Clone)] pub struct Pair< LeftPair: PairT, @@ -423,7 +412,6 @@ pub struct Pair< right: RightPair, } -#[cfg(feature = "full_crypto")] impl< LeftPair: PairT, RightPair: PairT, @@ -483,6 +471,7 @@ where Self::Public::unchecked_from(raw) } + #[cfg(feature = "full_crypto")] fn sign(&self, message: &[u8]) -> Self::Signature { let mut raw: [u8; SIGNATURE_LEN] = [0u8; SIGNATURE_LEN]; raw[..LeftPair::Signature::LEN].copy_from_slice(self.left.sign(message).as_ref()); diff --git a/substrate/primitives/core/src/sr25519.rs b/substrate/primitives/core/src/sr25519.rs index 7c02afc3cd5ff8fe378613d88c088c95ceb805c8..6c04eb8def1f4fdb2dd8ddc0773ab458b6ce1a8e 100644 --- a/substrate/primitives/core/src/sr25519.rs +++ b/substrate/primitives/core/src/sr25519.rs @@ -19,20 +19,14 @@ //! //! Note: `CHAIN_CODE_LENGTH` must be equal to `crate::crypto::JUNCTION_ID_LEN` //! for this to work. -#[cfg(any(feature = "full_crypto", feature = "serde"))] -use crate::crypto::DeriveJunction; #[cfg(feature = "serde")] use crate::crypto::Ss58Codec; +use crate::crypto::{DeriveError, DeriveJunction, Pair as TraitPair, SecretStringError}; #[cfg(feature = "full_crypto")] -use crate::crypto::{DeriveError, Pair as TraitPair, SecretStringError}; -#[cfg(feature = "full_crypto")] -use schnorrkel::{ - derive::CHAIN_CODE_LENGTH, signing_context, ExpansionMode, Keypair, MiniSecretKey, SecretKey, -}; -#[cfg(any(feature = "full_crypto", feature = "serde"))] +use schnorrkel::signing_context; use schnorrkel::{ - derive::{ChainCode, Derivation}, - PublicKey, + derive::{ChainCode, Derivation, CHAIN_CODE_LENGTH}, + ExpansionMode, Keypair, MiniSecretKey, PublicKey, SecretKey, }; use sp_std::vec::Vec; @@ -47,7 +41,6 @@ use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use sp_std::ops::Deref; -#[cfg(feature = "full_crypto")] use schnorrkel::keys::{MINI_SECRET_KEY_LENGTH, SECRET_KEY_LENGTH}; #[cfg(feature = "serde")] use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; @@ -56,14 +49,12 @@ use sp_runtime_interface::pass_by::PassByInner; use sp_std::alloc::{format, string::String}; // signing context -#[cfg(feature = "full_crypto")] const SIGNING_CTX: &[u8] = b"substrate"; /// An identifier used to match public keys against sr25519 keys pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"sr25"); /// An Schnorrkel/Ristretto x25519 ("sr25519") public key. -#[cfg_attr(feature = "full_crypto", derive(Hash))] #[derive( PartialEq, Eq, @@ -76,14 +67,13 @@ pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"sr25"); PassByInner, MaxEncodedLen, TypeInfo, + Hash, )] pub struct Public(pub [u8; 32]); /// An Schnorrkel/Ristretto x25519 ("sr25519") key pair. -#[cfg(feature = "full_crypto")] pub struct Pair(Keypair); -#[cfg(feature = "full_crypto")] impl Clone for Pair { fn clone(&self) -> Self { Pair(schnorrkel::Keypair { @@ -216,8 +206,7 @@ impl<'de> Deserialize<'de> for Public { } /// An Schnorrkel/Ristretto x25519 ("sr25519") signature. -#[cfg_attr(feature = "full_crypto", derive(Hash))] -#[derive(Encode, Decode, MaxEncodedLen, PassByInner, TypeInfo, PartialEq, Eq)] +#[derive(Encode, Decode, MaxEncodedLen, PassByInner, TypeInfo, PartialEq, Eq, Hash)] pub struct Signature(pub [u8; 64]); impl TryFrom<&[u8]> for Signature { @@ -435,16 +424,13 @@ impl AsRef for Pair { } /// Derive a single hard junction. -#[cfg(feature = "full_crypto")] fn derive_hard_junction(secret: &SecretKey, cc: &[u8; CHAIN_CODE_LENGTH]) -> MiniSecretKey { secret.hard_derive_mini_secret_key(Some(ChainCode(*cc)), b"").0 } /// The raw secret seed, which can be used to recreate the `Pair`. -#[cfg(feature = "full_crypto")] type Seed = [u8; MINI_SECRET_KEY_LENGTH]; -#[cfg(feature = "full_crypto")] impl TraitPair for Pair { type Public = Public; type Seed = Seed; @@ -499,6 +485,7 @@ impl TraitPair for Pair { Ok((Self(result.into()), seed.map(|s| MiniSecretKey::to_bytes(&s)))) } + #[cfg(feature = "full_crypto")] fn sign(&self, message: &[u8]) -> Signature { let context = signing_context(SIGNING_CTX); self.0.sign(context.bytes(message)).into() @@ -533,16 +520,13 @@ impl Pair { } impl CryptoType for Public { - #[cfg(feature = "full_crypto")] type Pair = Pair; } impl CryptoType for Signature { - #[cfg(feature = "full_crypto")] type Pair = Pair; } -#[cfg(feature = "full_crypto")] impl CryptoType for Pair { type Pair = Pair; } diff --git a/substrate/primitives/crypto/hashing/proc-macro/Cargo.toml b/substrate/primitives/crypto/hashing/proc-macro/Cargo.toml index 2856c93c7eed162d9b6459bfe3371c2f8752189f..f244b02ca101670df10f5aea1a1482cb86cfdaed 100644 --- a/substrate/primitives/crypto/hashing/proc-macro/Cargo.toml +++ b/substrate/primitives/crypto/hashing/proc-macro/Cargo.toml @@ -19,6 +19,6 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -quote = "1.0.28" -syn = { version = "2.0.49", features = ["full", "parsing"] } +quote = { workspace = true } +syn = { features = ["full", "parsing"], workspace = true } sp-crypto-hashing = { path = "..", default-features = false } diff --git a/substrate/primitives/debug-derive/Cargo.toml b/substrate/primitives/debug-derive/Cargo.toml index aaf5e618189f68c31da64d5c829fc78a78d7c745..debf964aa3dfdf7cebd23e0f1d24e74b0d880ec0 100644 --- a/substrate/primitives/debug-derive/Cargo.toml +++ b/substrate/primitives/debug-derive/Cargo.toml @@ -19,8 +19,8 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -quote = "1.0.28" -syn = "2.0.49" +quote = { workspace = true } +syn = { workspace = true } proc-macro2 = "1.0.56" [features] diff --git a/substrate/primitives/genesis-builder/Cargo.toml b/substrate/primitives/genesis-builder/Cargo.toml index 6497465a88128a769a22d0cde313ea30edd3e591..15440b4811ec4664725f8b0df5335748917b20fc 100644 --- a/substrate/primitives/genesis-builder/Cargo.toml +++ b/substrate/primitives/genesis-builder/Cargo.toml @@ -6,7 +6,7 @@ edition.workspace = true license = "Apache-2.0" homepage = "https://substrate.io" repository.workspace = true -description = "Substrate GenesisConfig builder API" +description = "Substrate RuntimeGenesisConfig builder API" readme = "README.md" [lints] @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] sp-api = { path = "../api", default-features = false } sp-runtime = { path = "../runtime", default-features = false } sp-std = { path = "../std", default-features = false } -serde_json = { version = "1.0.113", default-features = false, features = ["alloc", "arbitrary_precision"] } +serde_json = { features = ["alloc", "arbitrary_precision"], workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/genesis-builder/src/lib.rs b/substrate/primitives/genesis-builder/src/lib.rs index e002cd3aa6f704d8889fd35d04fdd65b46a1b754..bb1a2a352488ac0c03cfcd35dac1b45444b1d9a1 100644 --- a/substrate/primitives/genesis-builder/src/lib.rs +++ b/substrate/primitives/genesis-builder/src/lib.rs @@ -19,36 +19,37 @@ //! Substrate genesis config builder //! -//! This Runtime API allows to construct `GenesisConfig`, in particular: -//! - serialize the runtime default `GenesisConfig` struct into json format, -//! - put the GenesisConfig struct into the storage. Internally this operation calls +//! This Runtime API allows to construct `RuntimeGenesisConfig`, in particular: +//! - serialize the runtime default `RuntimeGenesisConfig` struct into json format, +//! - put the RuntimeGenesisConfig struct into the storage. Internally this operation calls //! `GenesisBuild::build` function for all runtime pallets, which is typically provided by //! pallet's author. -//! - deserialize the `GenesisConfig` from given json blob and put `GenesisConfig` into the state -//! storage. Allows to build customized configuration. +//! - deserialize the `RuntimeGenesisConfig` from given json blob and put `RuntimeGenesisConfig` +//! into the state storage. Allows to build customized configuration. //! -//! Providing externalities with empty storage and putting `GenesisConfig` into storage allows to -//! catch and build the raw storage of `GenesisConfig` which is the foundation for genesis block. +//! Providing externalities with empty storage and putting `RuntimeGenesisConfig` into storage +//! allows to catch and build the raw storage of `RuntimeGenesisConfig` which is the foundation for +//! genesis block. /// The result type alias, used in build methods. `Err` contains formatted error message. pub type Result = core::result::Result<(), sp_runtime::RuntimeString>; sp_api::decl_runtime_apis! { - /// API to interact with GenesisConfig for the runtime + /// API to interact with RuntimeGenesisConfig for the runtime pub trait GenesisBuilder { - /// Creates the default `GenesisConfig` and returns it as a JSON blob. + /// Creates the default `RuntimeGenesisConfig` and returns it as a JSON blob. /// - /// This function instantiates the default `GenesisConfig` struct for the runtime and serializes it into a JSON - /// blob. It returns a `Vec` containing the JSON representation of the default `GenesisConfig`. + /// This function instantiates the default `RuntimeGenesisConfig` struct for the runtime and serializes it into a JSON + /// blob. It returns a `Vec` containing the JSON representation of the default `RuntimeGenesisConfig`. fn create_default_config() -> sp_std::vec::Vec; - /// Build `GenesisConfig` from a JSON blob not using any defaults and store it in the storage. + /// Build `RuntimeGenesisConfig` from a JSON blob not using any defaults and store it in the storage. /// - /// This function deserializes the full `GenesisConfig` from the given JSON blob and puts it into the storage. + /// This function deserializes the full `RuntimeGenesisConfig` from the given JSON blob and puts it into the storage. /// If the provided JSON blob is incorrect or incomplete or the deserialization fails, an error is returned. /// It is recommended to log any errors encountered during the process. /// - /// Please note that provided json blob must contain all `GenesisConfig` fields, no defaults will be used. + /// Please note that provided json blob must contain all `RuntimeGenesisConfig` fields, no defaults will be used. fn build_config(json: sp_std::vec::Vec) -> Result; } } diff --git a/substrate/primitives/inherents/Cargo.toml b/substrate/primitives/inherents/Cargo.toml index 2a9d205e16642d3565a941f0a2969fadc43895cb..bfb1d7733471fa7fd9f0a13c14d337e34188065d 100644 --- a/substrate/primitives/inherents/Cargo.toml +++ b/substrate/primitives/inherents/Cargo.toml @@ -21,7 +21,7 @@ async-trait = { version = "0.1.74", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" -thiserror = { version = "1.0.48", optional = true } +thiserror = { optional = true, workspace = true } sp-runtime = { path = "../runtime", default-features = false, optional = true } sp-std = { path = "../std", default-features = false } diff --git a/substrate/primitives/io/Cargo.toml b/substrate/primitives/io/Cargo.toml index c78def9bf4429eab1e5c9822a52732ee3a3c74ca..e47775d56d9e55780c6e18c2d33e132407c83786 100644 --- a/substrate/primitives/io/Cargo.toml +++ b/substrate/primitives/io/Cargo.toml @@ -38,6 +38,9 @@ tracing-core = { version = "0.1.32", default-features = false } # Required for backwards compatibility reason, but only used for verifying when `UseDalekExt` is set. ed25519-dalek = { version = "2.1", default-features = false, optional = true } +[target.'cfg(all(any(target_arch = "riscv32", target_arch = "riscv64"), substrate_runtime))'.dependencies] +polkavm-derive = { workspace = true } + [build-dependencies] rustversion = "1.0.6" diff --git a/substrate/primitives/io/src/global_alloc_riscv.rs b/substrate/primitives/io/src/global_alloc_riscv.rs new file mode 100644 index 0000000000000000000000000000000000000000..8d7c69c20945946a352238dc8a94ecaba2f2a266 --- /dev/null +++ b/substrate/primitives/io/src/global_alloc_riscv.rs @@ -0,0 +1,19 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[global_allocator] +static ALLOCATOR: polkavm_derive::LeakingAllocator = polkavm_derive::LeakingAllocator; diff --git a/substrate/primitives/io/src/global_alloc_wasm.rs b/substrate/primitives/io/src/global_alloc_wasm.rs new file mode 100644 index 0000000000000000000000000000000000000000..cf19a6e21a2d26f20d7989a9b155709b2afa73ac --- /dev/null +++ b/substrate/primitives/io/src/global_alloc_wasm.rs @@ -0,0 +1,34 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use core::alloc::{GlobalAlloc, Layout}; + +/// Allocator used by Substrate from within the runtime. +struct RuntimeAllocator; + +#[global_allocator] +static ALLOCATOR: RuntimeAllocator = RuntimeAllocator; + +unsafe impl GlobalAlloc for RuntimeAllocator { + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + crate::allocator::malloc(layout.size() as u32) + } + + unsafe fn dealloc(&self, ptr: *mut u8, _: Layout) { + crate::allocator::free(ptr) + } +} diff --git a/substrate/primitives/io/src/lib.rs b/substrate/primitives/io/src/lib.rs index 6d34199416a36deeb011003989f6e361d535a0e6..684854ea5c8d4568c60b8ede7b89cdf4c26a4c38 100644 --- a/substrate/primitives/io/src/lib.rs +++ b/substrate/primitives/io/src/lib.rs @@ -129,6 +129,16 @@ use sp_externalities::{Externalities, ExternalitiesExt}; pub use sp_externalities::MultiRemovalResults; +#[cfg(all(not(feature = "disable_allocator"), substrate_runtime, target_family = "wasm"))] +mod global_alloc_wasm; + +#[cfg(all( + not(feature = "disable_allocator"), + substrate_runtime, + any(target_arch = "riscv32", target_arch = "riscv64") +))] +mod global_alloc_riscv; + #[cfg(feature = "std")] const LOG_TARGET: &str = "runtime::io"; @@ -1737,30 +1747,6 @@ mod tracing_setup { pub use tracing_setup::init_tracing; -/// Allocator used by Substrate from within the runtime. -#[cfg(substrate_runtime)] -struct RuntimeAllocator; - -#[cfg(all(not(feature = "disable_allocator"), substrate_runtime))] -#[global_allocator] -static ALLOCATOR: RuntimeAllocator = RuntimeAllocator; - -#[cfg(substrate_runtime)] -mod allocator_impl { - use super::*; - use core::alloc::{GlobalAlloc, Layout}; - - unsafe impl GlobalAlloc for RuntimeAllocator { - unsafe fn alloc(&self, layout: Layout) -> *mut u8 { - allocator::malloc(layout.size() as u32) - } - - unsafe fn dealloc(&self, ptr: *mut u8, _: Layout) { - allocator::free(ptr) - } - } -} - /// Crashes the execution of the program. /// /// Equivalent to the WASM `unreachable` instruction, RISC-V `unimp` instruction, diff --git a/substrate/primitives/keyring/Cargo.toml b/substrate/primitives/keyring/Cargo.toml index 1c936b6685be86c54a70824ab902f616c0639945..940fe90916d25aabef032437f0a6e0a05d270464 100644 --- a/substrate/primitives/keyring/Cargo.toml +++ b/substrate/primitives/keyring/Cargo.toml @@ -18,10 +18,13 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] strum = { version = "0.24.1", features = ["derive"], default-features = false } -sp-core = { path = "../core" } -sp-runtime = { path = "../runtime" } +sp-core = { path = "../core", default-features = false } +sp-runtime = { path = "../runtime", default-features = false } [features] +default = ["std"] +std = ["sp-core/std", "sp-runtime/std", "strum/std"] + # This feature adds Bandersnatch crypto primitives. # It should not be used in production since the implementation and interface may still # be subject to significant changes. diff --git a/substrate/primitives/keyring/check-features-variants.sh b/substrate/primitives/keyring/check-features-variants.sh new file mode 100755 index 0000000000000000000000000000000000000000..9c28d83589465483a290b6485f9c77fa7dbdb5a8 --- /dev/null +++ b/substrate/primitives/keyring/check-features-variants.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env -S bash -eux + +export RUSTFLAGS="-Cdebug-assertions=y -Dwarnings" +T=wasm32-unknown-unknown + +cargo check --release +cargo check --release --features="bandersnatch-experimental" +cargo check --release --target=$T --no-default-features diff --git a/substrate/primitives/keyring/src/bandersnatch.rs b/substrate/primitives/keyring/src/bandersnatch.rs index eb60f85632725ca9efbac46c63222fc71609d74f..67fc5c47df643ee4b45b8ebbb99089abdb466d96 100644 --- a/substrate/primitives/keyring/src/bandersnatch.rs +++ b/substrate/primitives/keyring/src/bandersnatch.rs @@ -18,14 +18,21 @@ //! A set of well-known keys used for testing. pub use sp_core::bandersnatch; +#[cfg(feature = "std")] +use sp_core::bandersnatch::Signature; use sp_core::{ - bandersnatch::{Pair, Public, Signature}, + bandersnatch::{Pair, Public}, crypto::UncheckedFrom, hex2array, ByteArray, Pair as PairT, }; +extern crate alloc; +use alloc::{fmt, format, str::FromStr, string::String, vec::Vec}; + /// Set of test accounts. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, strum::Display, strum::EnumIter)] +#[derive( + Debug, Clone, Copy, PartialEq, Eq, Hash, strum::Display, strum::EnumIter, Ord, PartialOrd, +)] pub enum Keyring { Alice, Bob, @@ -56,6 +63,7 @@ impl Keyring { Public::from(self).to_raw_vec() } + #[cfg(feature = "std")] pub fn sign(self, msg: &[u8]) -> Signature { Pair::from(self).sign(msg) } @@ -102,16 +110,16 @@ impl From for &'static str { #[derive(Debug)] pub struct ParseKeyringError; -impl std::fmt::Display for ParseKeyringError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +impl fmt::Display for ParseKeyringError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "ParseKeyringError") } } -impl std::str::FromStr for Keyring { +impl FromStr for Keyring { type Err = ParseKeyringError; - fn from_str(s: &str) -> Result::Err> { + fn from_str(s: &str) -> Result::Err> { match s { "Alice" => Ok(Keyring::Alice), "Bob" => Ok(Keyring::Bob), diff --git a/substrate/primitives/keyring/src/ed25519.rs b/substrate/primitives/keyring/src/ed25519.rs index ade42b294940213664d0553a4e4ec537ce553f15..98ca368e53caac26ae690b76500475b1749a07b1 100644 --- a/substrate/primitives/keyring/src/ed25519.rs +++ b/substrate/primitives/keyring/src/ed25519.rs @@ -18,14 +18,21 @@ //! Support code for the runtime. A set of test accounts. pub use sp_core::ed25519; +#[cfg(feature = "std")] +use sp_core::ed25519::Signature; use sp_core::{ - ed25519::{Pair, Public, Signature}, + ed25519::{Pair, Public}, hex2array, ByteArray, Pair as PairT, H256, }; use sp_runtime::AccountId32; +extern crate alloc; +use alloc::{format, string::String, vec::Vec}; + /// Set of test accounts. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, strum::Display, strum::EnumIter)] +#[derive( + Debug, Clone, Copy, PartialEq, Eq, Hash, strum::Display, strum::EnumIter, Ord, PartialOrd, +)] pub enum Keyring { Alice, Bob, @@ -76,6 +83,7 @@ impl Keyring { self.to_raw_public().into() } + #[cfg(feature = "std")] pub fn sign(self, msg: &[u8]) -> Signature { Pair::from(self).sign(msg) } diff --git a/substrate/primitives/keyring/src/lib.rs b/substrate/primitives/keyring/src/lib.rs index ee7fd56ba11bf5855d8ae648f073ea004bb257aa..f753bf4b0dd684d0aa292eeca1f97ad9f5cafdda 100644 --- a/substrate/primitives/keyring/src/lib.rs +++ b/substrate/primitives/keyring/src/lib.rs @@ -17,6 +17,8 @@ //! Support code for the runtime. A set of test accounts. +#![cfg_attr(not(feature = "std"), no_std)] + /// Test account crypto for sr25519. pub mod sr25519; diff --git a/substrate/primitives/keyring/src/sr25519.rs b/substrate/primitives/keyring/src/sr25519.rs index 1c2a2526efb1eccb16cb0696f89cb3fc7583357b..a3a506152d7d6f0b76b366a930860e39f3f8fae0 100644 --- a/substrate/primitives/keyring/src/sr25519.rs +++ b/substrate/primitives/keyring/src/sr25519.rs @@ -18,15 +18,22 @@ //! Support code for the runtime. A set of test accounts. pub use sp_core::sr25519; +#[cfg(feature = "std")] +use sp_core::sr25519::Signature; use sp_core::{ hex2array, - sr25519::{Pair, Public, Signature}, + sr25519::{Pair, Public}, ByteArray, Pair as PairT, H256, }; use sp_runtime::AccountId32; +extern crate alloc; +use alloc::{fmt, format, str::FromStr, string::String, vec::Vec}; + /// Set of test accounts. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, strum::Display, strum::EnumIter)] +#[derive( + Debug, Clone, Copy, PartialEq, Eq, Hash, strum::Display, strum::EnumIter, Ord, PartialOrd, +)] pub enum Keyring { Alice, Bob, @@ -77,6 +84,7 @@ impl Keyring { self.to_raw_public().into() } + #[cfg(feature = "std")] pub fn sign(self, msg: &[u8]) -> Signature { Pair::from(self).sign(msg) } @@ -140,16 +148,16 @@ impl From for sp_runtime::MultiSigner { #[derive(Debug)] pub struct ParseKeyringError; -impl std::fmt::Display for ParseKeyringError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +impl fmt::Display for ParseKeyringError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "ParseKeyringError") } } -impl std::str::FromStr for Keyring { +impl FromStr for Keyring { type Err = ParseKeyringError; - fn from_str(s: &str) -> Result::Err> { + fn from_str(s: &str) -> Result::Err> { match s { "alice" => Ok(Keyring::Alice), "bob" => Ok(Keyring::Bob), diff --git a/substrate/primitives/maybe-compressed-blob/Cargo.toml b/substrate/primitives/maybe-compressed-blob/Cargo.toml index 84a5592dcaf2702497cecf6e26d9878cf62a9c6b..fa5383d03b10d5c6560e50ff28be7b6e71ffe2b0 100644 --- a/substrate/primitives/maybe-compressed-blob/Cargo.toml +++ b/substrate/primitives/maybe-compressed-blob/Cargo.toml @@ -14,5 +14,5 @@ readme = "README.md" workspace = true [dependencies] -thiserror = "1.0" +thiserror = { workspace = true } zstd = { version = "0.12.4", default-features = false } diff --git a/substrate/primitives/merkle-mountain-range/Cargo.toml b/substrate/primitives/merkle-mountain-range/Cargo.toml index f859bb5514ea1dbfb72262a727fd1afa22bfc473..50d8477823948a4005a9cb0ccfabf73349f91222 100644 --- a/substrate/primitives/merkle-mountain-range/Cargo.toml +++ b/substrate/primitives/merkle-mountain-range/Cargo.toml @@ -19,13 +19,13 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } log = { workspace = true } mmr-lib = { package = "ckb-merkle-mountain-range", version = "0.5.2", default-features = false } -serde = { version = "1.0.196", features = ["alloc", "derive"], default-features = false, optional = true } +serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-api = { path = "../api", default-features = false } sp-core = { path = "../core", default-features = false } sp-debug-derive = { path = "../debug-derive", default-features = false } sp-runtime = { path = "../runtime", default-features = false } sp-std = { path = "../std", default-features = false } -thiserror = { version = "1.0", optional = true } +thiserror = { optional = true, workspace = true } [dev-dependencies] array-bytes = "6.1" diff --git a/substrate/primitives/npos-elections/Cargo.toml b/substrate/primitives/npos-elections/Cargo.toml index 8c3aee1d086c2082f56bad49274874247fc83fe8..7373aa849cb8d8f47fe3cffefd5302eee28a7dfb 100644 --- a/substrate/primitives/npos-elections/Cargo.toml +++ b/substrate/primitives/npos-elections/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", default-features = false, features = ["alloc", "derive"], optional = true } +serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-arithmetic = { path = "../arithmetic", default-features = false } sp-core = { path = "../core", default-features = false } sp-runtime = { path = "../runtime", default-features = false } diff --git a/substrate/primitives/rpc/Cargo.toml b/substrate/primitives/rpc/Cargo.toml index 04bbe3eea53f8d2554554f70a115f1b04fc2db7e..dce0eeee9f99bd9cd544e584e07cd3c29ca20ae1 100644 --- a/substrate/primitives/rpc/Cargo.toml +++ b/substrate/primitives/rpc/Cargo.toml @@ -17,8 +17,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] rustc-hash = "1.1.0" -serde = { version = "1.0.196", features = ["derive"] } +serde = { features = ["derive"], workspace = true, default-features = true } sp-core = { path = "../core" } [dev-dependencies] -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } diff --git a/substrate/primitives/runtime-interface/proc-macro/Cargo.toml b/substrate/primitives/runtime-interface/proc-macro/Cargo.toml index 8031ba75b9b9ad0d876561372626ca3196fcbe19..7dbd810fea932fd4bf284f80c1de976be7009b6d 100644 --- a/substrate/primitives/runtime-interface/proc-macro/Cargo.toml +++ b/substrate/primitives/runtime-interface/proc-macro/Cargo.toml @@ -22,6 +22,6 @@ proc-macro = true Inflector = "0.11.4" proc-macro-crate = "3.0.0" proc-macro2 = "1.0.56" -quote = "1.0.28" +quote = { workspace = true } expander = "2.0.0" -syn = { version = "2.0.49", features = ["extra-traits", "fold", "full", "visit"] } +syn = { features = ["extra-traits", "fold", "full", "visit"], workspace = true } diff --git a/substrate/primitives/runtime/Cargo.toml b/substrate/primitives/runtime/Cargo.toml index 8e3b63a55de64796c78a2a3a0680071399e7e66a..cacfc059722942f2fe70d951f8016449914c2e5b 100644 --- a/substrate/primitives/runtime/Cargo.toml +++ b/substrate/primitives/runtime/Cargo.toml @@ -25,7 +25,7 @@ log = { workspace = true } paste = "1.0" rand = { version = "0.8.5", optional = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", default-features = false, features = ["alloc", "derive"], optional = true } +serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-application-crypto = { path = "../application-crypto", default-features = false } sp-arithmetic = { path = "../arithmetic", default-features = false } sp-core = { path = "../core", default-features = false } @@ -38,7 +38,7 @@ simple-mermaid = { version = "0.1.1", optional = true } [dev-dependencies] rand = "0.8.5" -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } zstd = { version = "0.12.4", default-features = false } sp-api = { path = "../api" } sp-state-machine = { path = "../state-machine" } diff --git a/substrate/primitives/runtime/src/lib.rs b/substrate/primitives/runtime/src/lib.rs index ddf92554c83056f192015a26285408b7fdf33695..44bf3c969e5441245d1547e779195cdb68a83599 100644 --- a/substrate/primitives/runtime/src/lib.rs +++ b/substrate/primitives/runtime/src/lib.rs @@ -998,6 +998,16 @@ impl TransactionOutcome { } } +/// Confines the kind of extrinsics that can be included in a block. +#[derive(Debug, Default, PartialEq, Eq, Clone, Copy, Encode, Decode, TypeInfo)] +pub enum ExtrinsicInclusionMode { + /// All extrinsics are allowed to be included in this block. + #[default] + AllExtrinsics, + /// Inherents are allowed to be included. + OnlyInherents, +} + #[cfg(test)] mod tests { use crate::traits::BlakeTwo256; diff --git a/substrate/primitives/staking/Cargo.toml b/substrate/primitives/staking/Cargo.toml index 9429432babb5c88c827cbfc095efbbe869ea89db..21346fbaca5383554d1285ceeaba43815b81857a 100644 --- a/substrate/primitives/staking/Cargo.toml +++ b/substrate/primitives/staking/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.196", default-features = false, features = ["alloc", "derive"], optional = true } +serde = { features = ["alloc", "derive"], optional = true, workspace = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" diff --git a/substrate/primitives/state-machine/Cargo.toml b/substrate/primitives/state-machine/Cargo.toml index 489209ecc33732631b9848beba7a9a81047a6ad5..09994f1ae91e1c9a7dba2faf169870f08a640b22 100644 --- a/substrate/primitives/state-machine/Cargo.toml +++ b/substrate/primitives/state-machine/Cargo.toml @@ -23,7 +23,7 @@ log = { workspace = true } parking_lot = { version = "0.12.1", optional = true } rand = { version = "0.8.5", optional = true } smallvec = "1.11.0" -thiserror = { version = "1.0.48", optional = true } +thiserror = { optional = true, workspace = true } tracing = { version = "0.1.29", optional = true } sp-core = { path = "../core", default-features = false } sp-externalities = { path = "../externalities", default-features = false } diff --git a/substrate/primitives/statement-store/Cargo.toml b/substrate/primitives/statement-store/Cargo.toml index 14af57a7b419b5a5db29065106a8853a6a9bf493..652ab3ef13aa695f13c9b9c0a525b3f90dff8f0c 100644 --- a/substrate/primitives/statement-store/Cargo.toml +++ b/substrate/primitives/statement-store/Cargo.toml @@ -26,7 +26,7 @@ sp-api = { path = "../api", default-features = false } sp-application-crypto = { path = "../application-crypto", default-features = false } sp-runtime-interface = { path = "../runtime-interface", default-features = false } sp-externalities = { path = "../externalities", default-features = false } -thiserror = { version = "1.0", optional = true } +thiserror = { optional = true, workspace = true } # ECIES dependencies ed25519-dalek = { version = "2.1", optional = true } diff --git a/substrate/primitives/storage/Cargo.toml b/substrate/primitives/storage/Cargo.toml index e15a3296d5214962f7c1404edca53c2ff5ed2943..d3ade87ea47f848e8036ce0a4d374fa4d77184b2 100644 --- a/substrate/primitives/storage/Cargo.toml +++ b/substrate/primitives/storage/Cargo.toml @@ -20,7 +20,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } impl-serde = { version = "0.4.0", optional = true, default-features = false } ref-cast = "1.0.0" -serde = { version = "1.0.196", default-features = false, features = ["alloc", "derive"], optional = true } +serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-debug-derive = { path = "../debug-derive", default-features = false } sp-std = { path = "../std", default-features = false } diff --git a/substrate/primitives/test-primitives/Cargo.toml b/substrate/primitives/test-primitives/Cargo.toml index 0411326c28b858d830b67618f072782c1996e33e..f310216dd58d737be7cde512d3fbdf27f4fbda25 100644 --- a/substrate/primitives/test-primitives/Cargo.toml +++ b/substrate/primitives/test-primitives/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", default-features = false, features = ["derive"], optional = true } +serde = { features = ["derive"], optional = true, workspace = true } sp-application-crypto = { path = "../application-crypto", default-features = false } sp-core = { path = "../core", default-features = false } sp-runtime = { path = "../runtime", default-features = false } diff --git a/substrate/primitives/timestamp/Cargo.toml b/substrate/primitives/timestamp/Cargo.toml index 914c1e7865e972eb5f8acdba7d4cc9103ceb18f6..9e2b802bfb15e2e89169923d2e6f4274d2cdf1f2 100644 --- a/substrate/primitives/timestamp/Cargo.toml +++ b/substrate/primitives/timestamp/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = { version = "0.1.74", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -thiserror = { version = "1.0.48", optional = true } +thiserror = { optional = true, workspace = true } sp-inherents = { path = "../inherents", default-features = false } sp-runtime = { path = "../runtime", default-features = false } sp-std = { path = "../std", default-features = false } diff --git a/substrate/primitives/trie/Cargo.toml b/substrate/primitives/trie/Cargo.toml index f871f462d363375ac85cbceb2c1687b0a5daacc6..16d3ca19a179ce97bbc2c5c42023ceb54bf8b32b 100644 --- a/substrate/primitives/trie/Cargo.toml +++ b/substrate/primitives/trie/Cargo.toml @@ -30,7 +30,7 @@ nohash-hasher = { version = "0.2.0", optional = true } parking_lot = { version = "0.12.1", optional = true } rand = { version = "0.8", optional = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -thiserror = { version = "1.0.48", optional = true } +thiserror = { optional = true, workspace = true } tracing = { version = "0.1.29", optional = true } trie-db = { version = "0.28.0", default-features = false } trie-root = { version = "0.18.0", default-features = false } diff --git a/substrate/primitives/version/Cargo.toml b/substrate/primitives/version/Cargo.toml index 21f957763757a631bcb8a8208b0d4bb4a8ab7a4b..a94e2322430b421d24b438b2899b67feec9dd059 100644 --- a/substrate/primitives/version/Cargo.toml +++ b/substrate/primitives/version/Cargo.toml @@ -21,8 +21,8 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = impl-serde = { version = "0.4.0", default-features = false, optional = true } parity-wasm = { version = "0.45", optional = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", default-features = false, features = ["alloc", "derive"], optional = true } -thiserror = { version = "1.0.48", optional = true } +serde = { features = ["alloc", "derive"], optional = true, workspace = true } +thiserror = { optional = true, workspace = true } sp-crypto-hashing-proc-macro = { path = "../crypto/hashing/proc-macro" } sp-runtime = { path = "../runtime", default-features = false } sp-std = { path = "../std", default-features = false } diff --git a/substrate/primitives/version/proc-macro/Cargo.toml b/substrate/primitives/version/proc-macro/Cargo.toml index b99894f8ab5bd7f6fcbc1186ee290a9183a48804..f7abf88c9a67b7e3102d44254b520a9bbeb7a8a8 100644 --- a/substrate/primitives/version/proc-macro/Cargo.toml +++ b/substrate/primitives/version/proc-macro/Cargo.toml @@ -21,8 +21,8 @@ proc-macro = true [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } proc-macro2 = "1.0.56" -quote = "1.0.28" -syn = { version = "2.0.49", features = ["extra-traits", "fold", "full", "visit"] } +quote = { workspace = true } +syn = { features = ["extra-traits", "fold", "full", "visit"], workspace = true } [dev-dependencies] sp-version = { path = ".." } diff --git a/substrate/primitives/weights/Cargo.toml b/substrate/primitives/weights/Cargo.toml index 87e0c6a7299dd56181924dbcfefa600c5c1ebe50..a7d61de001b5525af1a43148a6c7f6f284c4e51f 100644 --- a/substrate/primitives/weights/Cargo.toml +++ b/substrate/primitives/weights/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] bounded-collections = { version = "0.2.0", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", default-features = false, optional = true, features = ["alloc", "derive"] } +serde = { optional = true, features = ["alloc", "derive"], workspace = true } smallvec = "1.11.0" sp-arithmetic = { path = "../arithmetic", default-features = false } sp-debug-derive = { path = "../debug-derive", default-features = false } diff --git a/substrate/primitives/weights/src/lib.rs b/substrate/primitives/weights/src/lib.rs index aede9473535a361325f64c177dd9f1861664ca40..b2c956266e2c3a346ca32c7b74ceb7f4c5cc2c0c 100644 --- a/substrate/primitives/weights/src/lib.rs +++ b/substrate/primitives/weights/src/lib.rs @@ -18,9 +18,6 @@ //! # Primitives for transaction weighting. #![cfg_attr(not(feature = "std"), no_std)] -// TODO remove once `OldWeight` is gone. I dont know why this is needed, maybe by one of the macros -// of `OldWeight`. -#![allow(deprecated)] extern crate self as sp_weights; @@ -28,7 +25,7 @@ mod weight_meter; mod weight_v2; use bounded_collections::Get; -use codec::{CompactAs, Decode, Encode, MaxEncodedLen}; +use codec::{Decode, Encode}; use scale_info::TypeInfo; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -52,28 +49,6 @@ pub mod constants { pub const WEIGHT_PROOF_SIZE_PER_KB: u64 = 1024; } -/// The old weight type. -/// -/// NOTE: This type exists purely for compatibility purposes! Use [`weight_v2::Weight`] in all other -/// cases. -#[derive( - Decode, - Encode, - CompactAs, - PartialEq, - Eq, - Clone, - Copy, - RuntimeDebug, - Default, - MaxEncodedLen, - TypeInfo, -)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -#[cfg_attr(feature = "serde", serde(transparent))] -#[deprecated(note = "Will be removed soon; use `Weight` instead.")] -pub struct OldWeight(pub u64); - /// The weight of database operations that the runtime can invoke. /// /// NOTE: This is currently only measured in computational time, and will probably diff --git a/substrate/primitives/weights/src/weight_meter.rs b/substrate/primitives/weights/src/weight_meter.rs index 584d22304c3ae33ece669bc69927a2568fb9b288..cfe8396ae6d67ed36d038f6afc2a7700c56b815a 100644 --- a/substrate/primitives/weights/src/weight_meter.rs +++ b/substrate/primitives/weights/src/weight_meter.rs @@ -118,13 +118,6 @@ impl WeightMeter { debug_assert!(self.consumed.all_lte(self.limit), "Weight counter overflow"); } - /// Consume the given weight after checking that it can be consumed and return `true`. Otherwise - /// do nothing and return `false`. - #[deprecated(note = "Use `try_consume` instead. Will be removed after December 2023.")] - pub fn check_accrue(&mut self, w: Weight) -> bool { - self.try_consume(w).is_ok() - } - /// Consume the given weight after checking that it can be consumed. /// /// Returns `Ok` if the weight can be consumed or otherwise an `Err`. @@ -139,16 +132,15 @@ impl WeightMeter { }) } - /// Check if the given weight can be consumed. - #[deprecated(note = "Use `can_consume` instead. Will be removed after December 2023.")] - pub fn can_accrue(&self, w: Weight) -> bool { - self.can_consume(w) - } - /// Check if the given weight can be consumed. pub fn can_consume(&self, w: Weight) -> bool { self.consumed.checked_add(&w).map_or(false, |t| t.all_lte(self.limit)) } + + /// Reclaim the given weight. + pub fn reclaim_proof_size(&mut self, s: u64) { + self.consumed.saturating_reduce(Weight::from_parts(0, s)); + } } #[cfg(test)] @@ -160,80 +152,80 @@ mod tests { fn weight_meter_remaining_works() { let mut meter = WeightMeter::with_limit(Weight::from_parts(10, 20)); - assert!(meter.check_accrue(Weight::from_parts(5, 0))); + assert_eq!(meter.try_consume(Weight::from_parts(5, 0)), Ok(())); assert_eq!(meter.consumed, Weight::from_parts(5, 0)); assert_eq!(meter.remaining(), Weight::from_parts(5, 20)); - assert!(meter.check_accrue(Weight::from_parts(2, 10))); + assert_eq!(meter.try_consume(Weight::from_parts(2, 10)), Ok(())); assert_eq!(meter.consumed, Weight::from_parts(7, 10)); assert_eq!(meter.remaining(), Weight::from_parts(3, 10)); - assert!(meter.check_accrue(Weight::from_parts(3, 10))); + assert_eq!(meter.try_consume(Weight::from_parts(3, 10)), Ok(())); assert_eq!(meter.consumed, Weight::from_parts(10, 20)); assert_eq!(meter.remaining(), Weight::from_parts(0, 0)); } #[test] - fn weight_meter_can_accrue_works() { + fn weight_meter_can_consume_works() { let meter = WeightMeter::with_limit(Weight::from_parts(1, 1)); - assert!(meter.can_accrue(Weight::from_parts(0, 0))); - assert!(meter.can_accrue(Weight::from_parts(1, 1))); - assert!(!meter.can_accrue(Weight::from_parts(0, 2))); - assert!(!meter.can_accrue(Weight::from_parts(2, 0))); - assert!(!meter.can_accrue(Weight::from_parts(2, 2))); + assert!(meter.can_consume(Weight::from_parts(0, 0))); + assert!(meter.can_consume(Weight::from_parts(1, 1))); + assert!(!meter.can_consume(Weight::from_parts(0, 2))); + assert!(!meter.can_consume(Weight::from_parts(2, 0))); + assert!(!meter.can_consume(Weight::from_parts(2, 2))); } #[test] - fn weight_meter_check_accrue_works() { + fn weight_meter_try_consume_works() { let mut meter = WeightMeter::with_limit(Weight::from_parts(2, 2)); - assert!(meter.check_accrue(Weight::from_parts(0, 0))); - assert!(meter.check_accrue(Weight::from_parts(1, 1))); - assert!(!meter.check_accrue(Weight::from_parts(0, 2))); - assert!(!meter.check_accrue(Weight::from_parts(2, 0))); - assert!(!meter.check_accrue(Weight::from_parts(2, 2))); - assert!(meter.check_accrue(Weight::from_parts(0, 1))); - assert!(meter.check_accrue(Weight::from_parts(1, 0))); + assert_eq!(meter.try_consume(Weight::from_parts(0, 0)), Ok(())); + assert_eq!(meter.try_consume(Weight::from_parts(1, 1)), Ok(())); + assert_eq!(meter.try_consume(Weight::from_parts(0, 2)), Err(())); + assert_eq!(meter.try_consume(Weight::from_parts(2, 0)), Err(())); + assert_eq!(meter.try_consume(Weight::from_parts(2, 2)), Err(())); + assert_eq!(meter.try_consume(Weight::from_parts(0, 1)), Ok(())); + assert_eq!(meter.try_consume(Weight::from_parts(1, 0)), Ok(())); } #[test] - fn weight_meter_check_and_can_accrue_works() { + fn weight_meter_check_and_can_consume_works() { let mut meter = WeightMeter::new(); - assert!(meter.can_accrue(Weight::from_parts(u64::MAX, 0))); - assert!(meter.check_accrue(Weight::from_parts(u64::MAX, 0))); + assert!(meter.can_consume(Weight::from_parts(u64::MAX, 0))); + assert_eq!(meter.try_consume(Weight::from_parts(u64::MAX, 0)), Ok(())); - assert!(meter.can_accrue(Weight::from_parts(0, u64::MAX))); - assert!(meter.check_accrue(Weight::from_parts(0, u64::MAX))); + assert!(meter.can_consume(Weight::from_parts(0, u64::MAX))); + assert_eq!(meter.try_consume(Weight::from_parts(0, u64::MAX)), Ok(())); - assert!(!meter.can_accrue(Weight::from_parts(0, 1))); - assert!(!meter.check_accrue(Weight::from_parts(0, 1))); + assert!(!meter.can_consume(Weight::from_parts(0, 1))); + assert_eq!(meter.try_consume(Weight::from_parts(0, 1)), Err(())); - assert!(!meter.can_accrue(Weight::from_parts(1, 0))); - assert!(!meter.check_accrue(Weight::from_parts(1, 0))); + assert!(!meter.can_consume(Weight::from_parts(1, 0))); + assert_eq!(meter.try_consume(Weight::from_parts(1, 0)), Err(())); - assert!(meter.can_accrue(Weight::zero())); - assert!(meter.check_accrue(Weight::zero())); + assert!(meter.can_consume(Weight::zero())); + assert_eq!(meter.try_consume(Weight::zero()), Ok(())); } #[test] fn consumed_ratio_works() { let mut meter = WeightMeter::with_limit(Weight::from_parts(10, 20)); - assert!(meter.check_accrue(Weight::from_parts(5, 0))); + assert_eq!(meter.try_consume(Weight::from_parts(5, 0)), Ok(())); assert_eq!(meter.consumed_ratio(), Perbill::from_percent(50)); - assert!(meter.check_accrue(Weight::from_parts(0, 12))); + assert_eq!(meter.try_consume(Weight::from_parts(0, 12)), Ok(())); assert_eq!(meter.consumed_ratio(), Perbill::from_percent(60)); - assert!(meter.check_accrue(Weight::from_parts(2, 0))); + assert_eq!(meter.try_consume(Weight::from_parts(2, 0)), Ok(())); assert_eq!(meter.consumed_ratio(), Perbill::from_percent(70)); - assert!(meter.check_accrue(Weight::from_parts(0, 4))); + assert_eq!(meter.try_consume(Weight::from_parts(0, 4)), Ok(())); assert_eq!(meter.consumed_ratio(), Perbill::from_percent(80)); - assert!(meter.check_accrue(Weight::from_parts(3, 0))); + assert_eq!(meter.try_consume(Weight::from_parts(3, 0)), Ok(())); assert_eq!(meter.consumed_ratio(), Perbill::from_percent(100)); - assert!(meter.check_accrue(Weight::from_parts(0, 4))); + assert_eq!(meter.try_consume(Weight::from_parts(0, 4)), Ok(())); assert_eq!(meter.consumed_ratio(), Perbill::from_percent(100)); } @@ -277,6 +269,21 @@ mod tests { assert_eq!(meter.consumed(), Weight::from_parts(5, 10)); } + #[test] + #[cfg(debug_assertions)] + fn reclaim_works() { + let mut meter = WeightMeter::with_limit(Weight::from_parts(5, 10)); + + meter.consume(Weight::from_parts(5, 10)); + assert_eq!(meter.consumed(), Weight::from_parts(5, 10)); + + meter.reclaim_proof_size(3); + assert_eq!(meter.consumed(), Weight::from_parts(5, 7)); + + meter.reclaim_proof_size(10); + assert_eq!(meter.consumed(), Weight::from_parts(5, 0)); + } + #[test] #[cfg(debug_assertions)] #[should_panic(expected = "Weight counter overflow")] diff --git a/substrate/scripts/run_all_benchmarks.sh b/substrate/scripts/run_all_benchmarks.sh index 83848100a7e5189f312411fb42eae7d4eb6a43c4..6dd7cede319f974a074d951d34ec3c970f02c32f 100755 --- a/substrate/scripts/run_all_benchmarks.sh +++ b/substrate/scripts/run_all_benchmarks.sh @@ -59,11 +59,12 @@ done if [ "$skip_build" != true ] then echo "[+] Compiling Substrate benchmarks..." - cargo build --profile=production --locked --features=runtime-benchmarks --bin substrate + cargo build --profile=production --locked --features=runtime-benchmarks --bin substrate-node fi # The executable to use. -SUBSTRATE=./target/production/substrate +# Parent directory because of the monorepo structure. +SUBSTRATE=../target/production/substrate-node # Manually exclude some pallets. EXCLUDED_PALLETS=( @@ -80,11 +81,7 @@ EXCLUDED_PALLETS=( # Load all pallet names in an array. ALL_PALLETS=($( - $SUBSTRATE benchmark pallet --list --chain=dev |\ - tail -n+2 |\ - cut -d',' -f1 |\ - sort |\ - uniq + $SUBSTRATE benchmark pallet --list=pallets --no-csv-header --chain=dev )) # Filter out the excluded pallets by concatenating the arrays and discarding duplicates. diff --git a/substrate/test-utils/client/Cargo.toml b/substrate/test-utils/client/Cargo.toml index 4ec2857b1096f6784bdbbbfab09a0dc7ba9b4e85..349b04d32d7baff5c24323e171ebe373730f464f 100644 --- a/substrate/test-utils/client/Cargo.toml +++ b/substrate/test-utils/client/Cargo.toml @@ -20,8 +20,8 @@ array-bytes = "6.1" async-trait = "0.1.74" codec = { package = "parity-scale-codec", version = "3.6.1" } futures = "0.3.21" -serde = "1.0.196" -serde_json = "1.0.113" +serde = { workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } sc-client-api = { path = "../../client/api" } sc-client-db = { path = "../../client/db", default-features = false, features = [ "test-helpers", diff --git a/substrate/test-utils/client/src/lib.rs b/substrate/test-utils/client/src/lib.rs index e3f06e27563581b100f489072d939b02d99a3247..d283b24f286aed743ba613f095f2cd319caba263 100644 --- a/substrate/test-utils/client/src/lib.rs +++ b/substrate/test-utils/client/src/lib.rs @@ -72,6 +72,7 @@ pub struct TestClientBuilder, bad_blocks: BadBlocks, enable_offchain_indexing_api: bool, + enable_import_proof_recording: bool, no_genesis: bool, } @@ -120,6 +121,7 @@ impl bad_blocks: None, enable_offchain_indexing_api: false, no_genesis: false, + enable_import_proof_recording: false, } } @@ -165,6 +167,12 @@ impl self } + /// Enable proof recording on import. + pub fn enable_import_proof_recording(mut self) -> Self { + self.enable_import_proof_recording = true; + self + } + /// Disable writing genesis. pub fn set_no_genesis(mut self) -> Self { self.no_genesis = true; @@ -202,6 +210,7 @@ impl }; let client_config = ClientConfig { + enable_import_proof_recording: self.enable_import_proof_recording, offchain_indexing_api: self.enable_offchain_indexing_api, no_genesis: self.no_genesis, ..Default::default() diff --git a/substrate/test-utils/runtime/Cargo.toml b/substrate/test-utils/runtime/Cargo.toml index 49b84e04dab45a2597ddacd61037a1467b4acae7..b6e6346e8017641dacd0f1bad4083db73be3fce8 100644 --- a/substrate/test-utils/runtime/Cargo.toml +++ b/substrate/test-utils/runtime/Cargo.toml @@ -24,9 +24,9 @@ sp-block-builder = { path = "../../primitives/block-builder", default-features = codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } sp-inherents = { path = "../../primitives/inherents", default-features = false } -sp-keyring = { path = "../../primitives/keyring", optional = true } +sp-keyring = { path = "../../primitives/keyring", default-features = false } sp-offchain = { path = "../../primitives/offchain", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false, features = ["serde"] } sp-crypto-hashing = { path = "../../primitives/crypto/hashing", default-features = false } sp-std = { path = "../../primitives/std", default-features = false } sp-io = { path = "../../primitives/io", default-features = false } @@ -62,8 +62,8 @@ sc-executor-common = { path = "../../client/executor/common" } sp-consensus = { path = "../../primitives/consensus/common" } substrate-test-runtime-client = { path = "client" } sp-tracing = { path = "../../primitives/tracing" } -serde = { version = "1.0.196", features = ["alloc", "derive"], default-features = false } -serde_json = { version = "1.0.113", default-features = false, features = ["alloc"] } +serde = { features = ["alloc", "derive"], workspace = true } +serde_json = { features = ["alloc"], workspace = true } [build-dependencies] substrate-wasm-builder = { path = "../../utils/wasm-builder", optional = true } @@ -99,7 +99,7 @@ std = [ "sp-genesis-builder/std", "sp-inherents/std", "sp-io/std", - "sp-keyring", + "sp-keyring/std", "sp-offchain/std", "sp-runtime/std", "sp-session/std", diff --git a/substrate/test-utils/runtime/src/genesismap.rs b/substrate/test-utils/runtime/src/genesismap.rs index 5ed9c8a645886f741611bc9fdc5d4eae01ae2704..9e972886b3771469284a7ee66e5f823623a75b07 100644 --- a/substrate/test-utils/runtime/src/genesismap.rs +++ b/substrate/test-utils/runtime/src/genesismap.rs @@ -124,7 +124,6 @@ impl GenesisStorageBuilder { .into_iter() .map(|x| (x.into(), 1)) .collect(), - epoch_config: Some(crate::TEST_RUNTIME_BABE_EPOCH_CONFIGURATION), ..Default::default() }, substrate_test: substrate_test_pallet::GenesisConfig { diff --git a/substrate/test-utils/runtime/src/lib.rs b/substrate/test-utils/runtime/src/lib.rs index 8bc6f72a82ec2d0690c24565e841931bada92f05..63e0aa6e13791cfa88c3c039b9ffcb76ab24e145 100644 --- a/substrate/test-utils/runtime/src/lib.rs +++ b/substrate/test-utils/runtime/src/lib.rs @@ -61,7 +61,7 @@ use sp_runtime::{ create_runtime_str, impl_opaque_keys, traits::{BlakeTwo256, Block as BlockT, DispatchInfoOf, NumberFor, Verify}, transaction_validity::{TransactionSource, TransactionValidity, TransactionValidityError}, - ApplyExtrinsicResult, Perbill, + ApplyExtrinsicResult, ExtrinsicInclusionMode, Perbill, }; #[cfg(any(feature = "std", test))] use sp_version::NativeVersion; @@ -480,9 +480,9 @@ impl_runtime_apis! { Executive::execute_block(block); } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> ExtrinsicInclusionMode { log::trace!(target: LOG_TARGET, "initialize_block: {header:#?}"); - Executive::initialize_block(header); + Executive::initialize_block(header) } } @@ -1291,7 +1291,7 @@ mod tests { let r = Vec::::decode(&mut &r[..]).unwrap(); let json = String::from_utf8(r.into()).expect("returned value is json. qed."); - let expected = r#"{"system":{},"babe":{"authorities":[],"epochConfig":null},"substrateTest":{"authorities":[]},"balances":{"balances":[]}}"#; + let expected = r#"{"system":{},"babe":{"authorities":[],"epochConfig":{"c":[1,4],"allowed_slots":"PrimaryAndSecondaryVRFSlots"}},"substrateTest":{"authorities":[]},"balances":{"balances":[]}}"#; assert_eq!(expected.to_string(), json); } diff --git a/substrate/test-utils/runtime/transaction-pool/Cargo.toml b/substrate/test-utils/runtime/transaction-pool/Cargo.toml index b52a897438b6854a066a95e51ba49bd0eddd7f84..33e56e8e55e7a7479fb1c783af818f3120e1b7f4 100644 --- a/substrate/test-utils/runtime/transaction-pool/Cargo.toml +++ b/substrate/test-utils/runtime/transaction-pool/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1" } futures = "0.3.21" parking_lot = "0.12.1" -thiserror = "1.0" +thiserror = { workspace = true } sc-transaction-pool = { path = "../../../client/transaction-pool" } sc-transaction-pool-api = { path = "../../../client/transaction-pool/api" } sp-blockchain = { path = "../../../primitives/blockchain" } diff --git a/substrate/test-utils/runtime/transaction-pool/src/lib.rs b/substrate/test-utils/runtime/transaction-pool/src/lib.rs index 8c8345b06bd32e4efbe2f721ddebe2cc2f2f9c4e..5202e6e65154d62085a0656cc780cc39452e0ed9 100644 --- a/substrate/test-utils/runtime/transaction-pool/src/lib.rs +++ b/substrate/test-utils/runtime/transaction-pool/src/lib.rs @@ -81,6 +81,7 @@ pub struct ChainState { pub block_by_hash: HashMap, pub nonces: HashMap, pub invalid_hashes: HashSet, + pub priorities: HashMap, } /// Test Api for transaction pool. @@ -214,6 +215,22 @@ impl TestApi { self.chain.write().invalid_hashes.insert(Self::hash_and_length_inner(xts).0); } + /// Remove a transaction that was previously declared as invalid via `[Self::add_invalid]`. + /// + /// Next time transaction pool will try to validate this + /// extrinsic, api will succeed. + pub fn remove_invalid(&self, xts: &Extrinsic) { + self.chain.write().invalid_hashes.remove(&Self::hash_and_length_inner(xts).0); + } + + /// Set a transaction priority. + pub fn set_priority(&self, xts: &Extrinsic, priority: u64) { + self.chain + .write() + .priorities + .insert(Self::hash_and_length_inner(xts).0, priority); + } + /// Query validation requests received. pub fn validation_requests(&self) -> Vec { self.validation_requests.read().clone() @@ -300,8 +317,14 @@ impl ChainApi for TestApi { return ready(Ok(Err(TransactionValidityError::Invalid(InvalidTransaction::Custom(0))))) } - let mut validity = - ValidTransaction { priority: 1, requires, provides, longevity: 64, propagate: true }; + let priority = self.chain.read().priorities.get(&self.hash_and_length(&uxt).0).cloned(); + let mut validity = ValidTransaction { + priority: priority.unwrap_or(1), + requires, + provides, + longevity: 64, + propagate: true, + }; (self.valid_modifier.read())(&mut validity); diff --git a/substrate/utils/frame/benchmarking-cli/Cargo.toml b/substrate/utils/frame/benchmarking-cli/Cargo.toml index 4f6cbc1d5d6a5aa05a0f20f151ad399ddcd854b8..a25ab3a8f5b159f600cdc088365ca8f970aa6a7f 100644 --- a/substrate/utils/frame/benchmarking-cli/Cargo.toml +++ b/substrate/utils/frame/benchmarking-cli/Cargo.toml @@ -21,7 +21,7 @@ chrono = "0.4" clap = { version = "4.5.1", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.6.1" } comfy-table = { version = "7.1.0", default-features = false } -handlebars = "4.2.2" +handlebars = "5.1.0" Inflector = "0.11.4" itertools = "0.10.3" lazy_static = "1.4.0" @@ -29,9 +29,9 @@ linked-hash-map = "0.5.4" log = { workspace = true, default-features = true } rand = { version = "0.8.5", features = ["small_rng"] } rand_pcg = "0.3.1" -serde = "1.0.196" -serde_json = "1.0.113" -thiserror = "1.0.48" +serde = { workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } +thiserror = { workspace = true } thousands = "0.2.0" frame-benchmarking = { path = "../../../frame/benchmarking" } frame-support = { path = "../../../frame/support" } diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs b/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs index dce49db15f7d4764aeba5f2bd0e0d1ede65cd15b..c6ba282401672335a6a72e698533a4af35670523 100644 --- a/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs +++ b/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use super::{writer, PalletCmd}; +use super::{writer, ListOutput, PalletCmd}; use codec::{Decode, Encode}; use frame_benchmarking::{ Analysis, BenchmarkBatch, BenchmarkBatchSplitResults, BenchmarkList, BenchmarkParameter, @@ -39,7 +39,13 @@ use sp_externalities::Extensions; use sp_keystore::{testing::MemoryKeystore, KeystoreExt}; use sp_runtime::traits::Hash; use sp_state_machine::StateMachine; -use std::{collections::HashMap, fmt::Debug, fs, str::FromStr, time}; +use std::{ + collections::{BTreeMap, BTreeSet, HashMap}, + fmt::Debug, + fs, + str::FromStr, + time, +}; /// Logging target const LOG_TARGET: &'static str = "frame::benchmark::pallet"; @@ -191,6 +197,7 @@ impl PalletCmd { let spec = config.chain_spec; let pallet = self.pallet.clone().unwrap_or_default(); let pallet = pallet.as_bytes(); + let extrinsic = self.extrinsic.clone().unwrap_or_default(); let extrinsic_split: Vec<&str> = extrinsic.split(',').collect(); let extrinsics: Vec<_> = extrinsic_split.iter().map(|x| x.trim().as_bytes()).collect(); @@ -291,16 +298,23 @@ impl PalletCmd { // Convert `Vec` to `String` for better readability. let benchmarks_to_run: Vec<_> = benchmarks_to_run .into_iter() - .map(|b| { + .map(|(pallet, extrinsic, components, pov_modes)| { + let pallet_name = + String::from_utf8(pallet.clone()).expect("Encoded from String; qed"); + let extrinsic_name = + String::from_utf8(extrinsic.clone()).expect("Encoded from String; qed"); ( - b.0, - b.1, - b.2, - b.3.into_iter() + pallet, + extrinsic, + components, + pov_modes + .into_iter() .map(|(p, s)| { (String::from_utf8(p).unwrap(), String::from_utf8(s).unwrap()) }) .collect(), + pallet_name, + extrinsic_name, ) }) .collect(); @@ -309,9 +323,8 @@ impl PalletCmd { return Err("No benchmarks found which match your input.".into()) } - if self.list { - // List benchmarks instead of running them - list_benchmark(benchmarks_to_run); + if let Some(list_output) = self.list { + list_benchmark(benchmarks_to_run, list_output, self.no_csv_header); return Ok(()) } @@ -323,12 +336,12 @@ impl PalletCmd { let mut component_ranges = HashMap::<(Vec, Vec), Vec>::new(); let pov_modes = Self::parse_pov_modes(&benchmarks_to_run)?; - for (pallet, extrinsic, components, _) in benchmarks_to_run.clone() { + for (pallet, extrinsic, components, _, pallet_name, extrinsic_name) in + benchmarks_to_run.clone() + { log::info!( target: LOG_TARGET, - "Starting benchmark: {}::{}", - String::from_utf8(pallet.clone()).expect("Encoded from String; qed"), - String::from_utf8(extrinsic.clone()).expect("Encoded from String; qed"), + "Starting benchmark: {pallet_name}::{extrinsic_name}" ); let all_components = if components.is_empty() { vec![Default::default()] @@ -409,12 +422,7 @@ impl PalletCmd { ) .map_err(|e| format!("Failed to decode benchmark results: {:?}", e))? .map_err(|e| { - format!( - "Benchmark {}::{} failed: {}", - String::from_utf8_lossy(&pallet), - String::from_utf8_lossy(&extrinsic), - e - ) + format!("Benchmark {pallet_name}::{extrinsic_name} failed: {e}",) })?; } // Do one loop of DB tracking. @@ -488,11 +496,7 @@ impl PalletCmd { log::info!( target: LOG_TARGET, - "Running benchmark: {}.{}({} args) {}/{} {}/{}", - String::from_utf8(pallet.clone()) - .expect("Encoded from String; qed"), - String::from_utf8(extrinsic.clone()) - .expect("Encoded from String; qed"), + "Running benchmark: {pallet_name}::{extrinsic_name}({} args) {}/{} {}/{}", components.len(), s + 1, // s starts at 0. all_components.len(), @@ -701,12 +705,14 @@ impl PalletCmd { Vec, Vec<(BenchmarkParameter, u32, u32)>, Vec<(String, String)>, + String, + String, )>, ) -> Result { use std::collections::hash_map::Entry; let mut parsed = PovModesMap::new(); - for (pallet, call, _components, pov_modes) in benchmarks { + for (pallet, call, _components, pov_modes, _, _) in benchmarks { for (pallet_storage, mode) in pov_modes { let mode = PovEstimationMode::from_str(&mode)?; let splits = pallet_storage.split("::").collect::>(); @@ -755,19 +761,45 @@ impl CliConfiguration for PalletCmd { /// List the benchmarks available in the runtime, in a CSV friendly format. fn list_benchmark( - mut benchmarks_to_run: Vec<( + benchmarks_to_run: Vec<( Vec, Vec, Vec<(BenchmarkParameter, u32, u32)>, Vec<(String, String)>, + String, + String, )>, + list_output: ListOutput, + no_csv_header: bool, ) { - // Sort and de-dub by pallet and function name. - benchmarks_to_run.sort_by(|(pa, sa, _, _), (pb, sb, _, _)| (pa, sa).cmp(&(pb, sb))); - benchmarks_to_run.dedup_by(|(pa, sa, _, _), (pb, sb, _, _)| (pa, sa) == (pb, sb)); + let mut benchmarks = BTreeMap::new(); - println!("pallet, benchmark"); - for (pallet, extrinsic, _, _) in benchmarks_to_run { - println!("{}, {}", String::from_utf8_lossy(&pallet), String::from_utf8_lossy(&extrinsic)); + // Sort and de-dub by pallet and function name. + benchmarks_to_run.iter().for_each(|(_, _, _, _, pallet_name, extrinsic_name)| { + benchmarks + .entry(pallet_name) + .or_insert_with(BTreeSet::new) + .insert(extrinsic_name); + }); + + match list_output { + ListOutput::All => { + if !no_csv_header { + println!("pallet,extrinsic"); + } + for (pallet, extrinsics) in benchmarks { + for extrinsic in extrinsics { + println!("{pallet},{extrinsic}"); + } + } + }, + ListOutput::Pallets => { + if !no_csv_header { + println!("pallet"); + }; + for pallet in benchmarks.keys() { + println!("{pallet}"); + } + }, } } diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/mod.rs b/substrate/utils/frame/benchmarking-cli/src/pallet/mod.rs index c69ce1765fc9dc490f1e0f40966ca07948b6b523..6dc56c0724eaffde1d70d25ef1161b29e01df0c4 100644 --- a/substrate/utils/frame/benchmarking-cli/src/pallet/mod.rs +++ b/substrate/utils/frame/benchmarking-cli/src/pallet/mod.rs @@ -19,6 +19,7 @@ mod command; mod writer; use crate::shared::HostInfoParams; +use clap::ValueEnum; use sc_cli::{ WasmExecutionMethod, WasmtimeInstantiationStrategy, DEFAULT_WASMTIME_INSTANTIATION_STRATEGY, DEFAULT_WASM_EXECUTION_METHOD, @@ -31,17 +32,32 @@ fn parse_pallet_name(pallet: &str) -> std::result::Result { Ok(pallet.replace("-", "_")) } +/// List options for available benchmarks. +#[derive(Debug, Clone, Copy, ValueEnum)] +pub enum ListOutput { + /// List all available pallets and extrinsics. + All, + /// List all available pallets only. + Pallets, +} + /// Benchmark the extrinsic weight of FRAME Pallets. #[derive(Debug, clap::Parser)] pub struct PalletCmd { /// Select a FRAME Pallet to benchmark, or `*` for all (in which case `extrinsic` must be `*`). - #[arg(short, long, value_parser = parse_pallet_name, required_unless_present_any = ["list", "json_input"])] + #[arg(short, long, value_parser = parse_pallet_name, required_unless_present_any = ["list", "json_input", "all"], default_value_if("all", "true", Some("*".into())))] pub pallet: Option, /// Select an extrinsic inside the pallet to benchmark, or `*` for all. - #[arg(short, long, required_unless_present_any = ["list", "json_input"])] + #[arg(short, long, required_unless_present_any = ["list", "json_input", "all"], default_value_if("all", "true", Some("*".into())))] pub extrinsic: Option, + /// Run benchmarks for all pallets and extrinsics. + /// + /// This is equivalent to running `--pallet * --extrinsic *`. + #[arg(long)] + pub all: bool, + /// Select how many samples we should take across the variable components. #[arg(short, long, default_value_t = 50)] pub steps: u32, @@ -158,11 +174,15 @@ pub struct PalletCmd { #[arg(long = "db-cache", value_name = "MiB", default_value_t = 1024)] pub database_cache_size: u32, - /// List the benchmarks that match your query rather than running them. + /// List and print available benchmarks in a csv-friendly format. /// - /// When nothing is provided, we list all benchmarks. - #[arg(long)] - pub list: bool, + /// NOTE: `num_args` and `require_equals` are required to allow `--list` + #[arg(long, value_enum, ignore_case = true, num_args = 0..=1, require_equals = true, default_missing_value("All"))] + pub list: Option, + + /// Don't include csv header when listing benchmarks. + #[arg(long, requires("list"))] + pub no_csv_header: bool, /// If enabled, the storage info is not displayed in the output next to the analysis. /// diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/writer.rs b/substrate/utils/frame/benchmarking-cli/src/pallet/writer.rs index 9493a693bbed321785bf0d23326d0f68b3efd534..bd4b65d8a2e378d543b54bebd8db4a1c3d378f98 100644 --- a/substrate/utils/frame/benchmarking-cli/src/pallet/writer.rs +++ b/substrate/utils/frame/benchmarking-cli/src/pallet/writer.rs @@ -153,8 +153,8 @@ fn map_results( continue } - let pallet_string = String::from_utf8(batch.pallet.clone()).unwrap(); - let instance_string = String::from_utf8(batch.instance.clone()).unwrap(); + let pallet_name = String::from_utf8(batch.pallet.clone()).unwrap(); + let instance_name = String::from_utf8(batch.instance.clone()).unwrap(); let benchmark_data = get_benchmark_data( batch, storage_info, @@ -166,7 +166,7 @@ fn map_results( worst_case_map_values, additional_trie_layers, ); - let pallet_benchmarks = all_benchmarks.entry((pallet_string, instance_string)).or_default(); + let pallet_benchmarks = all_benchmarks.entry((pallet_name, instance_name)).or_default(); pallet_benchmarks.push(benchmark_data); } Ok(all_benchmarks) @@ -571,19 +571,22 @@ pub(crate) fn process_storage_results( let mut prefix_result = result.clone(); let key_info = storage_info_map.get(&prefix); + let pallet_name = match key_info { + Some(k) => String::from_utf8(k.pallet_name.clone()).expect("encoded from string"), + None => "".to_string(), + }; + let storage_name = match key_info { + Some(k) => String::from_utf8(k.storage_name.clone()).expect("encoded from string"), + None => "".to_string(), + }; let max_size = key_info.and_then(|k| k.max_size); let override_pov_mode = match key_info { - Some(StorageInfo { pallet_name, storage_name, .. }) => { - let pallet_name = - String::from_utf8(pallet_name.clone()).expect("encoded from string"); - let storage_name = - String::from_utf8(storage_name.clone()).expect("encoded from string"); - + Some(_) => { // Is there an override for the storage key? - pov_modes.get(&(pallet_name.clone(), storage_name)).or( + pov_modes.get(&(pallet_name.clone(), storage_name.clone())).or( // .. or for the storage prefix? - pov_modes.get(&(pallet_name, "ALL".to_string())).or( + pov_modes.get(&(pallet_name.clone(), "ALL".to_string())).or( // .. or for the benchmark? pov_modes.get(&("ALL".to_string(), "ALL".to_string())), ), @@ -662,15 +665,10 @@ pub(crate) fn process_storage_results( // writes. if !is_prefix_identified { match key_info { - Some(key_info) => { + Some(_) => { let comment = format!( "Storage: `{}::{}` (r:{} w:{})", - String::from_utf8(key_info.pallet_name.clone()) - .expect("encoded from string"), - String::from_utf8(key_info.storage_name.clone()) - .expect("encoded from string"), - reads, - writes, + pallet_name, storage_name, reads, writes, ); comments.push(comment) }, @@ -698,11 +696,7 @@ pub(crate) fn process_storage_results( ) { Some(new_pov) => { let comment = format!( - "Proof: `{}::{}` (`max_values`: {:?}, `max_size`: {:?}, added: {}, mode: `{:?}`)", - String::from_utf8(key_info.pallet_name.clone()) - .expect("encoded from string"), - String::from_utf8(key_info.storage_name.clone()) - .expect("encoded from string"), + "Proof: `{pallet_name}::{storage_name}` (`max_values`: {:?}, `max_size`: {:?}, added: {}, mode: `{:?}`)", key_info.max_values, key_info.max_size, new_pov, @@ -711,13 +705,9 @@ pub(crate) fn process_storage_results( comments.push(comment) }, None => { - let pallet = String::from_utf8(key_info.pallet_name.clone()) - .expect("encoded from string"); - let item = String::from_utf8(key_info.storage_name.clone()) - .expect("encoded from string"); let comment = format!( "Proof: `{}::{}` (`max_values`: {:?}, `max_size`: {:?}, mode: `{:?}`)", - pallet, item, key_info.max_values, key_info.max_size, + pallet_name, storage_name, key_info.max_values, key_info.max_size, used_pov_mode, ); comments.push(comment); diff --git a/substrate/utils/frame/remote-externalities/Cargo.toml b/substrate/utils/frame/remote-externalities/Cargo.toml index c96d37d4ae63d1b27be2cdd42d1761154c1e4ec7..61e0a861ee0ebd847a54c46280d5c3c10fce0ad2 100644 --- a/substrate/utils/frame/remote-externalities/Cargo.toml +++ b/substrate/utils/frame/remote-externalities/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] jsonrpsee = { version = "0.22", features = ["http-client"] } codec = { package = "parity-scale-codec", version = "3.6.1" } log = { workspace = true, default-features = true } -serde = "1.0.196" +serde = { workspace = true, default-features = true } sp-core = { path = "../../../primitives/core" } sp-crypto-hashing = { path = "../../../primitives/crypto/hashing" } sp-state-machine = { path = "../../../primitives/state-machine" } diff --git a/substrate/utils/frame/rpc/client/Cargo.toml b/substrate/utils/frame/rpc/client/Cargo.toml index 0057bf6eb50b9b0279685659828798153c5bbcf8..b51e3f44f4e6457ee2f9e12dedace27728b96ac4 100644 --- a/substrate/utils/frame/rpc/client/Cargo.toml +++ b/substrate/utils/frame/rpc/client/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] jsonrpsee = { version = "0.22", features = ["ws-client"] } sc-rpc-api = { path = "../../../../client/rpc-api" } async-trait = "0.1.74" -serde = "1" +serde = { workspace = true, default-features = true } sp-runtime = { path = "../../../../primitives/runtime" } log = { workspace = true, default-features = true } diff --git a/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml b/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml index 58c2f3d99acc9a1b70144ac94994882041768b8a..f9a45e21ce13d3690c1efe366e1d0f480f0f9b8f 100644 --- a/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml +++ b/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -serde = { version = "1", features = ["derive"] } +serde = { features = ["derive"], workspace = true, default-features = true } sp-core = { path = "../../../../primitives/core" } sp-state-machine = { path = "../../../../primitives/state-machine" } @@ -32,4 +32,4 @@ sc-rpc-api = { path = "../../../../client/rpc-api" } sp-runtime = { path = "../../../../primitives/runtime" } [dev-dependencies] -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } diff --git a/substrate/utils/frame/rpc/support/Cargo.toml b/substrate/utils/frame/rpc/support/Cargo.toml index a4b0a7777fbf880c70bd4c6021b0b275d7fe794f..2e4bb6a105784fafecd1344a909e69dbcbfa0e3f 100644 --- a/substrate/utils/frame/rpc/support/Cargo.toml +++ b/substrate/utils/frame/rpc/support/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } jsonrpsee = { version = "0.22", features = ["jsonrpsee-types"] } -serde = "1" +serde = { workspace = true, default-features = true } frame-support = { path = "../../../../frame/support" } sc-rpc-api = { path = "../../../../client/rpc-api" } sp-storage = { path = "../../../../primitives/storage" } diff --git a/substrate/utils/frame/try-runtime/cli/Cargo.toml b/substrate/utils/frame/try-runtime/cli/Cargo.toml index 63165f07e5c0f48917659fc4f716c7eb4afe7d34..d123fc5f62dc5848dc560f1eca37523435ebb331 100644 --- a/substrate/utils/frame/try-runtime/cli/Cargo.toml +++ b/substrate/utils/frame/try-runtime/cli/Cargo.toml @@ -42,8 +42,8 @@ clap = { version = "4.5.1", features = ["derive"] } hex = { version = "0.4.3", default-features = false } log = { workspace = true, default-features = true } parity-scale-codec = "3.6.1" -serde = "1.0.196" -serde_json = "1.0.113" +serde = { workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } zstd = { version = "0.12.4", default-features = false } [dev-dependencies] diff --git a/substrate/utils/prometheus/Cargo.toml b/substrate/utils/prometheus/Cargo.toml index 2a09cb2bb5105f9a906f61b65dd3fc32cd8d784e..36527ac6183bb921266023320345afb9f6a89246 100644 --- a/substrate/utils/prometheus/Cargo.toml +++ b/substrate/utils/prometheus/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] hyper = { version = "0.14.16", default-features = false, features = ["http1", "server", "tcp"] } log = { workspace = true, default-features = true } prometheus = { version = "0.13.0", default-features = false } -thiserror = "1.0" +thiserror = { workspace = true } tokio = { version = "1.22.0", features = ["parking_lot"] } [dev-dependencies] diff --git a/substrate/utils/substrate-bip39/Cargo.toml b/substrate/utils/substrate-bip39/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..a46f81ee24d96d666495ee5cea5ca58b415a9cb0 --- /dev/null +++ b/substrate/utils/substrate-bip39/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "substrate-bip39" +version = "0.4.7" +license = "Apache-2.0" +description = "Converting BIP39 entropy to valid Substrate (sr25519) SecretKeys" +documentation = "https://docs.rs/substrate-bip39" +authors.workspace = true +edition.workspace = true +repository.workspace = true + +[dependencies] +hmac = "0.12.1" +pbkdf2 = { version = "0.12.2", default-features = false } +schnorrkel = { version = "0.11.4", default-features = false } +sha2 = { version = "0.10.7", default-features = false } +zeroize = { version = "1.4.3", default-features = false } + +[dev-dependencies] +bip39 = "2.0.0" +rustc-hex = "2.1.0" + +[features] +default = ["std"] +std = [ + "hmac/std", + "pbkdf2/std", + "schnorrkel/std", + "sha2/std", + "zeroize/alloc", + "zeroize/std", +] diff --git a/substrate/utils/substrate-bip39/README.md b/substrate/utils/substrate-bip39/README.md new file mode 100644 index 0000000000000000000000000000000000000000..368d18f406bb6cf36abad1daa4020882c1a2fe73 --- /dev/null +++ b/substrate/utils/substrate-bip39/README.md @@ -0,0 +1,55 @@ +# Substrate BIP39 + +This is a crate for deriving secret keys for Ristretto compressed Ed25519 (should be compatible with Ed25519 at this +time) from BIP39 phrases. + +## Why? + +The natural approach here would be to use the 64-byte seed generated from the BIP39 phrase, and use that to construct +the key. This approach, while reasonable and fairly straight forward to implement, also means we would have to inherit +all the characteristics of seed generation. Since we are breaking compatibility with both BIP32 and BIP44 anyway (which +we are free to do as we are no longer using the Secp256k1 curve), there is also no reason why we should adhere to BIP39 +seed generation from the mnemonic. + +BIP39 seed generation was designed to be compatible with user supplied brain wallet phrases as well as being extensible +to wallets providing their own dictionaries and checksum mechanism. Issues with those two points: + +1. Brain wallets are a horrible idea, simply because humans are bad entropy generators. It's next to impossible to + educate users on how to use that feature in a secure manner. The 2048 rounds of PBKDF2 is a mere inconvenience that + offers no real protection against dictionary attacks for anyone equipped with modern consumer hardware. Brain wallets + have given users false sense of security. _People have lost money_ this way and wallet providers today tend to stick + to CSPRNG supplied dictionary phrases. + +2. Providing own dictionaries felt into the _you ain't gonna need it_ anti-pattern category on day 1. Wallet providers + (be it hardware or software) typically want their products to be compatibile with other wallets so that users can + migrate to their product without having to migrate all their assets. + +To achieve the above phrases have to be precisely encoded in _The One True Canonical Encoding_, for which UTF-8 NFKD was +chosen. This is largely irrelevant (and even ignored) for English phrases, as they encode to basically just ASCII in +virtualy every character encoding known to mankind, but immedietly becomes a problem for dictionaries that do use +non-ASCII characters. Even if the right encoding is used and implemented correctly, there are still [other caveats +present for some non-english dictionaries](https://github.com/bitcoin/bips/blob/master/bip-0039/bip-0039-wordlists.md), +such as normalizing spaces to a canonical form, or making some latin based characters equivalent to their base in +dictionary lookups (eg. Spanish `ñ` and `n` are meant to be interchangeable). Thinking about all of this gives me a +headache, and opens doors for disagreements between buggy implementations, breaking compatibility. + +BIP39 does already provide a form of the mnemonic that is free from all of these issues: the entropy byte array. Since +veryfing the checksum requires that we recover the entropy from which the phrase was generated, no extra work is +actually needed here. Wallet implementators can encode the dictionaries in whatever encoding they find convenient (as +long as they are the standard BIP39 dictionaries), no harm in using UTF-16 string primitives that Java and JavaScript +provide. Since the dictionary is fixed and known, and the checksum is done on the entropy itself, the exact character +encoding used becomes irrelevant, as are the precise codepoints and amount of whitespace around the words. It is thus +much harder to create a buggy implementation. + +PBKDF2 was kept in place, along with the password. Using 24 words (with its 256 bits entropy) makes the extra hashing +redundant (if you could brute force 256 bit entropy, you can also just brute force secret keys), however some users +might be still using 12 word phrases from other applications. There is no good reason to prohibit users from recovering +their old wallets using 12 words that I can see, in which case the extra hashing does provide _some_ protection. +Passwords are also a feature that some power users find useful - particularly for creating a decoy address with a small +balance with empty password, while the funds proper are stored on an address that requires a password to be entered. + +## Why not ditch BIP39 altogether? + +Because there are hardware wallets that use a single phrase for the entire device, and operate multiple accounts on +multiple networks using that. A completely different wordlist would make their life much harder when it comes to +providing future Substrate support. diff --git a/substrate/utils/substrate-bip39/src/lib.rs b/substrate/utils/substrate-bip39/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..3673d20faed68e8897b97040c66d726a8877fd7a --- /dev/null +++ b/substrate/utils/substrate-bip39/src/lib.rs @@ -0,0 +1,232 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(not(feature = "std"))] +extern crate alloc; +#[cfg(not(feature = "std"))] +use alloc::string::String; + +use hmac::Hmac; +use pbkdf2::pbkdf2; +use schnorrkel::keys::MiniSecretKey; +use sha2::Sha512; +use zeroize::Zeroize; + +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub enum Error { + InvalidEntropy, +} + +/// `entropy` should be a byte array from a correctly recovered and checksumed BIP39. +/// +/// This function accepts slices of different length for different word lengths: +/// +/// + 16 bytes for 12 words. +/// + 20 bytes for 15 words. +/// + 24 bytes for 18 words. +/// + 28 bytes for 21 words. +/// + 32 bytes for 24 words. +/// +/// Any other length will return an error. +/// +/// `password` is analog to BIP39 seed generation itself, with an empty string being defalt. +pub fn mini_secret_from_entropy(entropy: &[u8], password: &str) -> Result { + let seed = seed_from_entropy(entropy, password)?; + Ok(MiniSecretKey::from_bytes(&seed[..32]).expect("Length is always correct; qed")) +} + +/// Similar to `mini_secret_from_entropy`, except that it provides the 64-byte seed directly. +pub fn seed_from_entropy(entropy: &[u8], password: &str) -> Result<[u8; 64], Error> { + if entropy.len() < 16 || entropy.len() > 32 || entropy.len() % 4 != 0 { + return Err(Error::InvalidEntropy); + } + + let mut salt = String::with_capacity(8 + password.len()); + salt.push_str("mnemonic"); + salt.push_str(password); + + let mut seed = [0u8; 64]; + + pbkdf2::>(entropy, salt.as_bytes(), 2048, &mut seed) + .map_err(|_| Error::InvalidEntropy)?; + + salt.zeroize(); + + Ok(seed) +} + +#[cfg(test)] +mod test { + use super::*; + + #[cfg(not(feature = "std"))] + use alloc::vec::Vec; + + use bip39::{Language, Mnemonic}; + use rustc_hex::FromHex; + + // phrase, entropy, seed, expanded secret_key + // + // ALL SEEDS GENERATED USING "Substrate" PASSWORD! + static VECTORS: &[[&str; 3]] = &[ + [ + "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon about", + "00000000000000000000000000000000", + "44e9d125f037ac1d51f0a7d3649689d422c2af8b1ec8e00d71db4d7bf6d127e33f50c3d5c84fa3e5399c72d6cbbbbc4a49bf76f76d952f479d74655a2ef2d453", + ], + [ + "legal winner thank year wave sausage worth useful legal winner thank yellow", + "7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f", + "4313249608fe8ac10fd5886c92c4579007272cb77c21551ee5b8d60b780416850f1e26c1f4b8d88ece681cb058ab66d6182bc2ce5a03181f7b74c27576b5c8bf", + ], + [ + "letter advice cage absurd amount doctor acoustic avoid letter advice cage above", + "80808080808080808080808080808080", + "27f3eb595928c60d5bc91a4d747da40ed236328183046892ed6cd5aa9ae38122acd1183adf09a89839acb1e6eaa7fb563cc958a3f9161248d5a036e0d0af533d", + ], + [ + "zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo wrong", + "ffffffffffffffffffffffffffffffff", + "227d6256fd4f9ccaf06c45eaa4b2345969640462bbb00c5f51f43cb43418c7a753265f9b1e0c0822c155a9cabc769413ecc14553e135fe140fc50b6722c6b9df", + ], + [ + "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon agent", + "000000000000000000000000000000000000000000000000", + "44e9d125f037ac1d51f0a7d3649689d422c2af8b1ec8e00d71db4d7bf6d127e33f50c3d5c84fa3e5399c72d6cbbbbc4a49bf76f76d952f479d74655a2ef2d453", + ], + [ + "legal winner thank year wave sausage worth useful legal winner thank year wave sausage worth useful legal will", + "7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f", + "cb1d50e14101024a88905a098feb1553d4306d072d7460e167a60ccb3439a6817a0afc59060f45d999ddebc05308714733c9e1e84f30feccddd4ad6f95c8a445", + ], + [ + "letter advice cage absurd amount doctor acoustic avoid letter advice cage absurd amount doctor acoustic avoid letter always", + "808080808080808080808080808080808080808080808080", + "9ddecf32ce6bee77f867f3c4bb842d1f0151826a145cb4489598fe71ac29e3551b724f01052d1bc3f6d9514d6df6aa6d0291cfdf997a5afdb7b6a614c88ab36a", + ], + [ + "zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo when", + "ffffffffffffffffffffffffffffffffffffffffffffffff", + "8971cb290e7117c64b63379c97ed3b5c6da488841bd9f95cdc2a5651ac89571e2c64d391d46e2475e8b043911885457cd23e99a28b5a18535fe53294dc8e1693", + ], + [ + "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon art", + "0000000000000000000000000000000000000000000000000000000000000000", + "44e9d125f037ac1d51f0a7d3649689d422c2af8b1ec8e00d71db4d7bf6d127e33f50c3d5c84fa3e5399c72d6cbbbbc4a49bf76f76d952f479d74655a2ef2d453", + ], + [ + "legal winner thank year wave sausage worth useful legal winner thank year wave sausage worth useful legal winner thank year wave sausage worth title", + "7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f", + "3037276a5d05fcd7edf51869eb841bdde27c574dae01ac8cfb1ea476f6bea6ef57ab9afe14aea1df8a48f97ae25b37d7c8326e49289efb25af92ba5a25d09ed3", + ], + [ + "letter advice cage absurd amount doctor acoustic avoid letter advice cage absurd amount doctor acoustic avoid letter advice cage absurd amount doctor acoustic bless", + "8080808080808080808080808080808080808080808080808080808080808080", + "2c9c6144a06ae5a855453d98c3dea470e2a8ffb78179c2e9eb15208ccca7d831c97ddafe844ab933131e6eb895f675ede2f4e39837bb5769d4e2bc11df58ac42", + ], + [ + "zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo vote", + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "047e89ef7739cbfe30da0ad32eb1720d8f62441dd4f139b981b8e2d0bd412ed4eb14b89b5098c49db2301d4e7df4e89c21e53f345138e56a5e7d63fae21c5939", + ], + [ + "ozone drill grab fiber curtain grace pudding thank cruise elder eight picnic", + "9e885d952ad362caeb4efe34a8e91bd2", + "f4956be6960bc145cdab782e649a5056598fd07cd3f32ceb73421c3da27833241324dc2c8b0a4d847eee457e6d4c5429f5e625ece22abaa6a976e82f1ec5531d", + ], + [ + "gravity machine north sort system female filter attitude volume fold club stay feature office ecology stable narrow fog", + "6610b25967cdcca9d59875f5cb50b0ea75433311869e930b", + "fbcc5229ade0c0ff018cb7a329c5459f91876e4dde2a97ddf03c832eab7f26124366a543f1485479c31a9db0d421bda82d7e1fe562e57f3533cb1733b001d84d", + ], + [ + "hamster diagram private dutch cause delay private meat slide toddler razor book happy fancy gospel tennis maple dilemma loan word shrug inflict delay length", + "68a79eaca2324873eacc50cb9c6eca8cc68ea5d936f98787c60c7ebc74e6ce7c", + "7c60c555126c297deddddd59f8cdcdc9e3608944455824dd604897984b5cc369cad749803bb36eb8b786b570c9cdc8db275dbe841486676a6adf389f3be3f076", + ], + [ + "scheme spot photo card baby mountain device kick cradle pact join borrow", + "c0ba5a8e914111210f2bd131f3d5e08d", + "c12157bf2506526c4bd1b79a056453b071361538e9e2c19c28ba2cfa39b5f23034b974e0164a1e8acd30f5b4c4de7d424fdb52c0116bfc6a965ba8205e6cc121", + ], + [ + "horn tenant knee talent sponsor spell gate clip pulse soap slush warm silver nephew swap uncle crack brave", + "6d9be1ee6ebd27a258115aad99b7317b9c8d28b6d76431c3", + "23766723e970e6b79dec4d5e4fdd627fd27d1ee026eb898feb9f653af01ad22080c6f306d1061656d01c4fe9a14c05f991d2c7d8af8730780de4f94cd99bd819", + ], + [ + "panda eyebrow bullet gorilla call smoke muffin taste mesh discover soft ostrich alcohol speed nation flash devote level hobby quick inner drive ghost inside", + "9f6a2878b2520799a44ef18bc7df394e7061a224d2c33cd015b157d746869863", + "f4c83c86617cb014d35cd87d38b5ef1c5d5c3d58a73ab779114438a7b358f457e0462c92bddab5a406fe0e6b97c71905cf19f925f356bc673ceb0e49792f4340", + ], + [ + "cat swing flag economy stadium alone churn speed unique patch report train", + "23db8160a31d3e0dca3688ed941adbf3", + "719d4d4de0638a1705bf5237262458983da76933e718b2d64eb592c470f3c5d222e345cc795337bb3da393b94375ff4a56cfcd68d5ea25b577ee9384d35f4246", + ], + [ + "light rule cinnamon wrap drastic word pride squirrel upgrade then income fatal apart sustain crack supply proud access", + "8197a4a47f0425faeaa69deebc05ca29c0a5b5cc76ceacc0", + "7ae1291db32d16457c248567f2b101e62c5549d2a64cd2b7605d503ec876d58707a8d663641e99663bc4f6cc9746f4852e75e7e54de5bc1bd3c299c9a113409e", + ], + [ + "all hour make first leader extend hole alien behind guard gospel lava path output census museum junior mass reopen famous sing advance salt reform", + "066dca1a2bb7e8a1db2832148ce9933eea0f3ac9548d793112d9a95c9407efad", + "a911a5f4db0940b17ecb79c4dcf9392bf47dd18acaebdd4ef48799909ebb49672947cc15f4ef7e8ef47103a1a91a6732b821bda2c667e5b1d491c54788c69391", + ], + [ + "vessel ladder alter error federal sibling chat ability sun glass valve picture", + "f30f8c1da665478f49b001d94c5fc452", + "4e2314ca7d9eebac6fe5a05a5a8d3546bc891785414d82207ac987926380411e559c885190d641ff7e686ace8c57db6f6e4333c1081e3d88d7141a74cf339c8f", + ], + [ + "scissors invite lock maple supreme raw rapid void congress muscle digital elegant little brisk hair mango congress clump", + "c10ec20dc3cd9f652c7fac2f1230f7a3c828389a14392f05", + "7a83851102849edc5d2a3ca9d8044d0d4f00e5c4a292753ed3952e40808593251b0af1dd3c9ed9932d46e8608eb0b928216a6160bd4fc775a6e6fbd493d7c6b2", + ], + [ + "void come effort suffer camp survey warrior heavy shoot primary clutch crush open amazing screen patrol group space point ten exist slush involve unfold", + "f585c11aec520db57dd353c69554b21a89b20fb0650966fa0a9d6f74fd989d8f", + "938ba18c3f521f19bd4a399c8425b02c716844325b1a65106b9d1593fbafe5e0b85448f523f91c48e331995ff24ae406757cff47d11f240847352b348ff436ed", + ] + ]; + + #[test] + fn vectors_are_correct() { + for vector in VECTORS { + let phrase = vector[0]; + + let expected_entropy: Vec = vector[1].from_hex().unwrap(); + let expected_seed: Vec = vector[2].from_hex().unwrap(); + + let mnemonic = Mnemonic::parse_in(Language::English, phrase).unwrap(); + let seed = seed_from_entropy(&mnemonic.to_entropy(), "Substrate").unwrap(); + let secret = mini_secret_from_entropy(&mnemonic.to_entropy(), "Substrate") + .unwrap() + .to_bytes(); + + assert_eq!( + mnemonic.to_entropy(), + &expected_entropy[..], + "Entropy is incorrect for {}", + phrase + ); + assert_eq!(&seed[..], &expected_seed[..], "Seed is incorrect for {}", phrase); + assert_eq!(&secret[..], &expected_seed[..32], "Secret is incorrect for {}", phrase); + } + } +} diff --git a/substrate/utils/wasm-builder/src/prerequisites.rs b/substrate/utils/wasm-builder/src/prerequisites.rs index a601e3210dd0c2b89beffa1d5d6b788cb35fd6a3..22caf8950637960310c62eb98c5358de55c1662d 100644 --- a/substrate/utils/wasm-builder/src/prerequisites.rs +++ b/substrate/utils/wasm-builder/src/prerequisites.rs @@ -149,6 +149,14 @@ impl<'a> DummyCrate<'a> { sysroot_cmd.output().ok().and_then(|o| String::from_utf8(o.stdout).ok()) } + fn get_toolchain(&self) -> Option { + let sysroot = self.get_sysroot()?; + Path::new(sysroot.trim()) + .file_name() + .and_then(|s| s.to_str()) + .map(|s| s.to_string()) + } + fn try_build(&self) -> Result<(), Option> { let Ok(result) = self.prepare_command("build").output() else { return Err(None) }; if !result.status.success() { @@ -164,14 +172,15 @@ fn check_wasm_toolchain_installed( let dummy_crate = DummyCrate::new(&cargo_command, RuntimeTarget::Wasm); if let Err(error) = dummy_crate.try_build() { + let toolchain = dummy_crate.get_toolchain().unwrap_or("".to_string()); let basic_error_message = colorize_error_message( - "Rust WASM toolchain is not properly installed; please install it!", + &format!("Rust WASM target for toolchain {toolchain} is not properly installed; please install it!") ); return match error { None => Err(basic_error_message), Some(error) if error.contains("the `wasm32-unknown-unknown` target may not be installed") => { - Err(colorize_error_message("Cannot compile the WASM runtime: the `wasm32-unknown-unknown` target is not installed!\n\ - You can install it with `rustup target add wasm32-unknown-unknown` if you're using `rustup`.")) + Err(colorize_error_message(&format!("Cannot compile the WASM runtime: the `wasm32-unknown-unknown` target is not installed!\n\ + You can install it with `rustup target add wasm32-unknown-unknown --toolchain {toolchain}` if you're using `rustup`."))) }, // Apparently this can happen when we're running on a non Tier 1 platform. Some(ref error) if error.contains("linker `rust-lld` not found") => @@ -193,9 +202,10 @@ fn check_wasm_toolchain_installed( let src_path = Path::new(sysroot.trim()).join("lib").join("rustlib").join("src").join("rust"); if !src_path.exists() { + let toolchain = dummy_crate.get_toolchain().unwrap_or("".to_string()); return Err(colorize_error_message( - "Cannot compile the WASM runtime: no standard library sources found!\n\ - You can install them with `rustup component add rust-src` if you're using `rustup`.", + &format!("Cannot compile the WASM runtime: no standard library sources found at {}!\n\ + You can install them with `rustup component add rust-src --toolchain {toolchain}` if you're using `rustup`.", src_path.display()), )) } } diff --git a/substrate/utils/wasm-builder/src/wasm_project.rs b/substrate/utils/wasm-builder/src/wasm_project.rs index bc8c9b3b4b3999da9be5e1fb18a8c73444c1867b..13db9fce43103dabf94346b2a75b64cfeaca22e5 100644 --- a/substrate/utils/wasm-builder/src/wasm_project.rs +++ b/substrate/utils/wasm-builder/src/wasm_project.rs @@ -855,7 +855,7 @@ fn build_bloaty_blob( println!("{} {}", colorize_info_message("Using rustc version:"), cargo_cmd.rustc_version()); // Use `process::exit(1)` to have a clean error output. - if build_cmd.status().map(|s| s.success()).is_err() { + if !build_cmd.status().map_or(false, |s| s.success()) { process::exit(1); } @@ -877,7 +877,7 @@ fn build_bloaty_blob( if polkavm_path .metadata() .map(|polkavm_metadata| { - polkavm_metadata.modified().unwrap() >= elf_metadata.modified().unwrap() + polkavm_metadata.modified().unwrap() < elf_metadata.modified().unwrap() }) .unwrap_or(true) { diff --git a/substrate/zombienet/0002-validators-warp-sync/test-validators-warp-sync.zndsl b/substrate/zombienet/0002-validators-warp-sync/test-validators-warp-sync.zndsl index ea0f15e5d8ace1315e231c6db2e01399e0ce44f3..b68bce508c008396d699e67e08b98cdf74ba5138 100644 --- a/substrate/zombienet/0002-validators-warp-sync/test-validators-warp-sync.zndsl +++ b/substrate/zombienet/0002-validators-warp-sync/test-validators-warp-sync.zndsl @@ -34,8 +34,8 @@ bob: reports block height is at least {{DB_BLOCK_HEIGHT}} within 10 seconds alice: reports substrate_beefy_best_block is at least {{DB_BLOCK_HEIGHT}} within 180 seconds bob: reports substrate_beefy_best_block is at least {{DB_BLOCK_HEIGHT}} within 180 seconds -alice: reports substrate_beefy_best_block is greater than {{DB_BLOCK_HEIGHT}} within 60 seconds -bob: reports substrate_beefy_best_block is greater than {{DB_BLOCK_HEIGHT}} within 60 seconds +alice: reports substrate_beefy_best_block is greater than {{DB_BLOCK_HEIGHT}} within 180 seconds +bob: reports substrate_beefy_best_block is greater than {{DB_BLOCK_HEIGHT}} within 180 seconds alice: count of log lines containing "error" is 0 within 10 seconds bob: count of log lines containing "verification failed" is 0 within 10 seconds diff --git a/templates/minimal/README.md b/templates/minimal/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/templates/minimal/node/Cargo.toml b/templates/minimal/node/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..410cfc9f6c34844a2c6edb5390c6e16c50cc4406 --- /dev/null +++ b/templates/minimal/node/Cargo.toml @@ -0,0 +1,62 @@ +[package] +name = "minimal-template-node" +description = "A miniaml Substrate-based Substrate node, ready for hacking." +version = "0.0.0" +license = "MIT-0" +authors.workspace = true +homepage.workspace = true +repository.workspace = true +edition.workspace = true +publish = false +build = "build.rs" + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +clap = { version = "4.5.1", features = ["derive"] } +futures = { version = "0.3.21", features = ["thread-pool"] } +futures-timer = "3.0.1" +jsonrpsee = { version = "0.22", features = ["server"] } +serde_json = { workspace = true, default-features = true } + +sc-cli = { path = "../../../substrate/client/cli" } +sc-executor = { path = "../../../substrate/client/executor" } +sc-network = { path = "../../../substrate/client/network" } +sc-service = { path = "../../../substrate/client/service" } +sc-telemetry = { path = "../../../substrate/client/telemetry" } +sc-transaction-pool = { path = "../../../substrate/client/transaction-pool" } +sc-transaction-pool-api = { path = "../../../substrate/client/transaction-pool/api" } +sc-consensus = { path = "../../../substrate/client/consensus/common" } +sc-consensus-manual-seal = { path = "../../../substrate/client/consensus/manual-seal" } +sc-rpc-api = { path = "../../../substrate/client/rpc-api" } +sc-basic-authorship = { path = "../../../substrate/client/basic-authorship" } +sc-offchain = { path = "../../../substrate/client/offchain" } +sc-client-api = { path = "../../../substrate/client/api" } + +sp-timestamp = { path = "../../../substrate/primitives/timestamp" } +sp-keyring = { path = "../../../substrate/primitives/keyring" } +sp-api = { path = "../../../substrate/primitives/api" } +sp-blockchain = { path = "../../../substrate/primitives/blockchain" } +sp-block-builder = { path = "../../../substrate/primitives/block-builder" } +sp-io = { path = "../../../substrate/primitives/io" } +sp-runtime = { path = "../../../substrate/primitives/runtime" } + +substrate-frame-rpc-system = { path = "../../../substrate/utils/frame/rpc/system" } + +# Once the native runtime is gone, there should be little to no dependency on FRAME here, and +# certainly no dependency on the runtime. +frame = { path = "../../../substrate/frame", features = [ + "experimental", + "runtime", +] } +runtime = { package = "minimal-template-runtime", path = "../runtime" } + +[build-dependencies] +substrate-build-script-utils = { path = "../../../substrate/utils/build-script-utils" } + +[features] +default = [] diff --git a/substrate/bin/minimal/node/build.rs b/templates/minimal/node/build.rs similarity index 100% rename from substrate/bin/minimal/node/build.rs rename to templates/minimal/node/build.rs diff --git a/substrate/bin/minimal/node/src/chain_spec.rs b/templates/minimal/node/src/chain_spec.rs similarity index 100% rename from substrate/bin/minimal/node/src/chain_spec.rs rename to templates/minimal/node/src/chain_spec.rs diff --git a/substrate/bin/minimal/node/src/cli.rs b/templates/minimal/node/src/cli.rs similarity index 100% rename from substrate/bin/minimal/node/src/cli.rs rename to templates/minimal/node/src/cli.rs diff --git a/substrate/bin/minimal/node/src/command.rs b/templates/minimal/node/src/command.rs similarity index 100% rename from substrate/bin/minimal/node/src/command.rs rename to templates/minimal/node/src/command.rs diff --git a/substrate/bin/minimal/node/src/lib.rs b/templates/minimal/node/src/lib.rs similarity index 94% rename from substrate/bin/minimal/node/src/lib.rs rename to templates/minimal/node/src/lib.rs index c2065def736aeeb3b1102bade04063532128f3cd..cb8ed3bd209dd24d1bfa9d53b904d411acbea02e 100644 --- a/substrate/bin/minimal/node/src/lib.rs +++ b/templates/minimal/node/src/lib.rs @@ -1,4 +1,4 @@ -// This file is part of Substrate. +// This file is part of Polkadot Sdk. // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 diff --git a/substrate/bin/minimal/node/src/main.rs b/templates/minimal/node/src/main.rs similarity index 100% rename from substrate/bin/minimal/node/src/main.rs rename to templates/minimal/node/src/main.rs diff --git a/substrate/bin/minimal/node/src/rpc.rs b/templates/minimal/node/src/rpc.rs similarity index 100% rename from substrate/bin/minimal/node/src/rpc.rs rename to templates/minimal/node/src/rpc.rs diff --git a/substrate/bin/minimal/node/src/service.rs b/templates/minimal/node/src/service.rs similarity index 100% rename from substrate/bin/minimal/node/src/service.rs rename to templates/minimal/node/src/service.rs diff --git a/templates/minimal/pallets/template/Cargo.toml b/templates/minimal/pallets/template/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..9982e5ea53bc3eff28fb0331cfb14ad41a15eec7 --- /dev/null +++ b/templates/minimal/pallets/template/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "pallet-minimal-template" +description = "A minimal pallet built with FRAME, part of Polkadot Sdk." +version = "0.0.0" +license = "MIT-0" +authors.workspace = true +homepage.workspace = true +repository.workspace = true +edition.workspace = true +publish = false + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", features = [ + "derive", +], default-features = false } +scale-info = { version = "2.10.0", default-features = false, features = [ + "derive", +] } +frame = { path = "../../../../substrate/frame", default-features = false, features = [ + "experimental", + "runtime", +] } + + +[features] +default = ["std"] +std = ["codec/std", "frame/std", "scale-info/std"] diff --git a/templates/minimal/pallets/template/src/lib.rs b/templates/minimal/pallets/template/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..713f014bbe61fc9fa7df5019c00afddebf02bbc6 --- /dev/null +++ b/templates/minimal/pallets/template/src/lib.rs @@ -0,0 +1,19 @@ +//! A shell pallet built with [`frame`]. + +#![cfg_attr(not(feature = "std"), no_std)] + +use frame::prelude::*; + +// Re-export all pallet parts, this is needed to properly import the pallet into the runtime. +pub use pallet::*; + +#[frame::pallet] +pub mod pallet { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); +} diff --git a/templates/minimal/runtime/Cargo.toml b/templates/minimal/runtime/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..20ffb706eb4992b71a7b34d638b7c4cdc70237fd --- /dev/null +++ b/templates/minimal/runtime/Cargo.toml @@ -0,0 +1,59 @@ +[package] +name = "minimal-template-runtime" +description = "A solochain runtime template built with Substrate, part of Polkadot Sdk." +version = "0.0.0" +license = "MIT-0" +authors.workspace = true +homepage.workspace = true +repository.workspace = true +edition.workspace = true +publish = false + +[lints] +workspace = true + +[dependencies] +parity-scale-codec = { version = "3.0.0", default-features = false } +scale-info = { version = "2.6.0", default-features = false } + +# this is a frame-based runtime, thus importing `frame` with runtime feature enabled. +frame = { path = "../../../substrate/frame", default-features = false, features = [ + "experimental", + "runtime", +] } + +# pallets that we want to use +pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } +pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false } +pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false } +pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } + +# genesis builder that allows us to interacto with runtime genesis config +sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } + +# local pallet templates +pallet-minimal-template = { path = "../pallets/template", default-features = false } + +[build-dependencies] +substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder", optional = true } + +[features] +default = ["std"] +std = [ + "parity-scale-codec/std", + "scale-info/std", + + "frame/std", + + "pallet-balances/std", + "pallet-sudo/std", + "pallet-timestamp/std", + "pallet-transaction-payment-rpc-runtime-api/std", + "pallet-transaction-payment/std", + + "pallet-minimal-template/std", + + "sp-genesis-builder/std", + "substrate-wasm-builder", +] diff --git a/substrate/bin/minimal/runtime/build.rs b/templates/minimal/runtime/build.rs similarity index 100% rename from substrate/bin/minimal/runtime/build.rs rename to templates/minimal/runtime/build.rs diff --git a/substrate/bin/minimal/runtime/src/lib.rs b/templates/minimal/runtime/src/lib.rs similarity index 93% rename from substrate/bin/minimal/runtime/src/lib.rs rename to templates/minimal/runtime/src/lib.rs index 610289693d91b778295b3e48569fc887f805a204..91859cf4a2f57c47e9029ad0bc877f3393e80cf8 100644 --- a/substrate/bin/minimal/runtime/src/lib.rs +++ b/templates/minimal/runtime/src/lib.rs @@ -22,21 +22,24 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); use frame::{ - deps::frame_support::weights::{FixedFee, NoFee}, + deps::frame_support::{ + genesis_builder_helper::{build_config, create_default_config}, + weights::{FixedFee, NoFee}, + }, prelude::*, runtime::{ apis::{ - self, impl_runtime_apis, ApplyExtrinsicResult, CheckInherentsResult, OpaqueMetadata, + self, impl_runtime_apis, ApplyExtrinsicResult, CheckInherentsResult, + ExtrinsicInclusionMode, OpaqueMetadata, }, prelude::*, }, }; -use frame_support::genesis_builder_helper::{build_config, create_default_config}; #[runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("minimal-runtime"), - impl_name: create_runtime_str!("minimal-runtime"), + spec_name: create_runtime_str!("minimal-template-runtime"), + impl_name: create_runtime_str!("minimal-template-runtime"), authoring_version: 1, spec_version: 0, impl_version: 1, @@ -70,6 +73,9 @@ construct_runtime!( Balances: pallet_balances, Sudo: pallet_sudo, TransactionPayment: pallet_transaction_payment, + + // our local pallet + Template: pallet_minimal_template, } ); @@ -103,6 +109,8 @@ impl pallet_transaction_payment::Config for Runtime { type LengthToFee = FixedFee<1, ::Balance>; } +impl pallet_minimal_template::Config for Runtime {} + type Block = frame::runtime::types_common::BlockOf; type Header = HeaderFor; @@ -121,7 +129,7 @@ impl_runtime_apis! { RuntimeExecutive::execute_block(block) } - fn initialize_block(header: &Header) { + fn initialize_block(header: &Header) -> ExtrinsicInclusionMode { RuntimeExecutive::initialize_block(header) } } diff --git a/cumulus/parachain-template/LICENSE b/templates/parachain/LICENSE similarity index 100% rename from cumulus/parachain-template/LICENSE rename to templates/parachain/LICENSE diff --git a/cumulus/parachain-template/README.md b/templates/parachain/README.md similarity index 100% rename from cumulus/parachain-template/README.md rename to templates/parachain/README.md diff --git a/cumulus/parachain-template/node/Cargo.toml b/templates/parachain/node/Cargo.toml similarity index 75% rename from cumulus/parachain-template/node/Cargo.toml rename to templates/parachain/node/Cargo.toml index 77b484913b82425da112348cba340c0dc2041a8e..b4f3a504a4d9d7aebef6262d5677e6ff9967b8c2 100644 --- a/cumulus/parachain-template/node/Cargo.toml +++ b/templates/parachain/node/Cargo.toml @@ -1,26 +1,29 @@ [package] name = "parachain-template-node" -version = "0.1.0" -authors = ["Anonymous"] -description = "A new Cumulus FRAME-based Substrate Node, ready for hacking together a parachain." -license = "Unlicense" -homepage = "https://substrate.io" +description = "A parachain node template built with Substrate and Cumulus, part of Polkadot Sdk." +version = "0.0.0" +license = "MIT-0" +authors.workspace = true +homepage.workspace = true repository.workspace = true edition.workspace = true -build = "build.rs" publish = false +build = "build.rs" [lints] workspace = true +# [[bin]] +# name = "parachain-template-node" + [dependencies] clap = { version = "4.5.1", features = ["derive"] } log = { workspace = true, default-features = true } codec = { package = "parity-scale-codec", version = "3.0.0" } -serde = { version = "1.0.196", features = ["derive"] } +serde = { features = ["derive"], workspace = true, default-features = true } jsonrpsee = { version = "0.22", features = ["server"] } futures = "0.3.28" -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } # Local parachain-template-runtime = { path = "../runtime" } @@ -63,15 +66,15 @@ polkadot-primitives = { path = "../../../polkadot/primitives" } xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } # Cumulus -cumulus-client-cli = { path = "../../client/cli" } -cumulus-client-collator = { path = "../../client/collator" } -cumulus-client-consensus-aura = { path = "../../client/consensus/aura" } -cumulus-client-consensus-common = { path = "../../client/consensus/common" } -cumulus-client-consensus-proposer = { path = "../../client/consensus/proposer" } -cumulus-client-service = { path = "../../client/service" } -cumulus-primitives-core = { path = "../../primitives/core" } -cumulus-primitives-parachain-inherent = { path = "../../primitives/parachain-inherent" } -cumulus-relay-chain-interface = { path = "../../client/relay-chain-interface" } +cumulus-client-cli = { path = "../../../cumulus/client/cli" } +cumulus-client-collator = { path = "../../../cumulus/client/collator" } +cumulus-client-consensus-aura = { path = "../../../cumulus/client/consensus/aura" } +cumulus-client-consensus-common = { path = "../../../cumulus/client/consensus/common" } +cumulus-client-consensus-proposer = { path = "../../../cumulus/client/consensus/proposer" } +cumulus-client-service = { path = "../../../cumulus/client/service" } +cumulus-primitives-core = { path = "../../../cumulus/primitives/core" } +cumulus-primitives-parachain-inherent = { path = "../../../cumulus/primitives/parachain-inherent" } +cumulus-relay-chain-interface = { path = "../../../cumulus/client/relay-chain-interface" } color-print = "0.3.4" [build-dependencies] diff --git a/cumulus/parachain-template/node/build.rs b/templates/parachain/node/build.rs similarity index 100% rename from cumulus/parachain-template/node/build.rs rename to templates/parachain/node/build.rs diff --git a/cumulus/parachain-template/node/src/chain_spec.rs b/templates/parachain/node/src/chain_spec.rs similarity index 93% rename from cumulus/parachain-template/node/src/chain_spec.rs rename to templates/parachain/node/src/chain_spec.rs index a79c78699c07102aca27ed06c73f93ccf86ed3ce..16c91865cdb4aa9ae3c5882652100f5ef56a988c 100644 --- a/cumulus/parachain-template/node/src/chain_spec.rs +++ b/templates/parachain/node/src/chain_spec.rs @@ -1,5 +1,6 @@ use cumulus_primitives_core::ParaId; -use parachain_template_runtime::{AccountId, AuraId, Signature, EXISTENTIAL_DEPOSIT}; +use parachain_template_runtime as runtime; +use runtime::{AccountId, AuraId, Signature, EXISTENTIAL_DEPOSIT}; use sc_chain_spec::{ChainSpecExtension, ChainSpecGroup}; use sc_service::ChainType; use serde::{Deserialize, Serialize}; @@ -56,8 +57,8 @@ where /// Generate the session keys from individual elements. /// /// The input must be a tuple of individual keys (a single arg for now since we have just one key). -pub fn template_session_keys(keys: AuraId) -> parachain_template_runtime::SessionKeys { - parachain_template_runtime::SessionKeys { aura: keys } +pub fn template_session_keys(keys: AuraId) -> runtime::SessionKeys { + runtime::SessionKeys { aura: keys } } pub fn development_config() -> ChainSpec { @@ -68,8 +69,7 @@ pub fn development_config() -> ChainSpec { properties.insert("ss58Format".into(), 42.into()); ChainSpec::builder( - parachain_template_runtime::WASM_BINARY - .expect("WASM binary was not built, please build it!"), + runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "rococo-local".into(), // You MUST set this to the correct network! @@ -120,8 +120,7 @@ pub fn local_testnet_config() -> ChainSpec { #[allow(deprecated)] ChainSpec::builder( - parachain_template_runtime::WASM_BINARY - .expect("WASM binary was not built, please build it!"), + runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "rococo-local".into(), // You MUST set this to the correct network! diff --git a/cumulus/parachain-template/node/src/cli.rs b/templates/parachain/node/src/cli.rs similarity index 99% rename from cumulus/parachain-template/node/src/cli.rs rename to templates/parachain/node/src/cli.rs index 73ef996b7504114b3578604a8d2c37661c9261fc..a5c55b8118a89d3dede620099a7447e3ab5dda6d 100644 --- a/cumulus/parachain-template/node/src/cli.rs +++ b/templates/parachain/node/src/cli.rs @@ -40,7 +40,7 @@ pub enum Subcommand { /// Try-runtime has migrated to a standalone /// [CLI](). The subcommand exists as a stub and - /// deprecation notice. It will be removed entirely some time after Janurary 2024. + /// deprecation notice. It will be removed entirely some time after January 2024. TryRuntime, } diff --git a/cumulus/parachain-template/node/src/command.rs b/templates/parachain/node/src/command.rs similarity index 98% rename from cumulus/parachain-template/node/src/command.rs rename to templates/parachain/node/src/command.rs index 72b3ab7bb4b94735cdf4910b2cd1f46c22e7b16e..82624ae0be59e3159477b6f35b95f86177a35754 100644 --- a/cumulus/parachain-template/node/src/command.rs +++ b/templates/parachain/node/src/command.rs @@ -1,5 +1,6 @@ use std::net::SocketAddr; +use cumulus_client_service::storage_proof_size::HostFunctions as ReclaimHostFunctions; use cumulus_primitives_core::ParaId; use frame_benchmarking_cli::{BenchmarkCmd, SUBSTRATE_REFERENCE_HARDWARE}; use log::info; @@ -183,7 +184,7 @@ pub fn run() -> Result<()> { match cmd { BenchmarkCmd::Pallet(cmd) => if cfg!(feature = "runtime-benchmarks") { - runner.sync_run(|config| cmd.run::, ()>(config)) + runner.sync_run(|config| cmd.run::, ReclaimHostFunctions>(config)) } else { Err("Benchmarking wasn't enabled when building the node. \ You can enable it with `--features runtime-benchmarks`." diff --git a/cumulus/parachain-template/node/src/main.rs b/templates/parachain/node/src/main.rs similarity index 100% rename from cumulus/parachain-template/node/src/main.rs rename to templates/parachain/node/src/main.rs diff --git a/cumulus/parachain-template/node/src/rpc.rs b/templates/parachain/node/src/rpc.rs similarity index 100% rename from cumulus/parachain-template/node/src/rpc.rs rename to templates/parachain/node/src/rpc.rs diff --git a/cumulus/parachain-template/node/src/service.rs b/templates/parachain/node/src/service.rs similarity index 98% rename from cumulus/parachain-template/node/src/service.rs rename to templates/parachain/node/src/service.rs index 830b6e82f969190b69425bd59a939c49372bf9f3..4dd24803e9b124d386d4912236ae1abd42229802 100644 --- a/cumulus/parachain-template/node/src/service.rs +++ b/templates/parachain/node/src/service.rs @@ -40,7 +40,10 @@ use substrate_prometheus_endpoint::Registry; pub struct ParachainNativeExecutor; impl sc_executor::NativeExecutionDispatch for ParachainNativeExecutor { - type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; + type ExtendHostFunctions = ( + cumulus_client_service::storage_proof_size::HostFunctions, + frame_benchmarking::benchmarking::HostFunctions, + ); fn dispatch(method: &str, data: &[u8]) -> Option> { parachain_template_runtime::api::dispatch(method, data) @@ -100,10 +103,11 @@ pub fn new_partial(config: &Configuration) -> Result let executor = ParachainExecutor::new_with_wasm_executor(wasm); let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts::( + sc_service::new_full_parts_record_import::( config, telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), executor, + true, )?; let client = Arc::new(client); diff --git a/cumulus/parachain-template/pallets/template/Cargo.toml b/templates/parachain/pallets/template/Cargo.toml similarity index 69% rename from cumulus/parachain-template/pallets/template/Cargo.toml rename to templates/parachain/pallets/template/Cargo.toml index bd9d21d2dd29f41009b4a8bed4b326746fac1b2b..89eb9d5171630459e2e313ad94cee53e97921cac 100644 --- a/cumulus/parachain-template/pallets/template/Cargo.toml +++ b/templates/parachain/pallets/template/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "pallet-parachain-template" -authors = ["Anonymous"] description = "FRAME pallet template for defining custom runtime logic." -version = "0.7.0" -license = "Unlicense" -homepage = "https://substrate.io" +version = "0.0.0" +license = "MIT-0" +authors.workspace = true +homepage.workspace = true repository.workspace = true edition.workspace = true +publish = false [lints] workspace = true @@ -15,21 +16,22 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"], default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ + "derive", +] } +scale-info = { version = "2.10.0", default-features = false, features = [ + "derive", +] } -# Substrate +# frame deps frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", default-features = false, optional = true } frame-support = { path = "../../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../../substrate/frame/system", default-features = false } [dev-dependencies] -serde = { version = "1.0.196" } - -# Substrate -sp-core = { path = "../../../../substrate/primitives/core", default-features = false } -sp-io = { path = "../../../../substrate/primitives/io", default-features = false } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } +sp-core = { path = "../../../../substrate/primitives/core" } +sp-io = { path = "../../../../substrate/primitives/io" } +sp-runtime = { path = "../../../../substrate/primitives/runtime" } [features] default = ["std"] @@ -41,7 +43,7 @@ runtime-benchmarks = [ ] std = [ "codec/std", - "frame-benchmarking/std", + "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "scale-info/std", diff --git a/substrate/bin/node-template/pallets/template/README.md b/templates/parachain/pallets/template/README.md similarity index 100% rename from substrate/bin/node-template/pallets/template/README.md rename to templates/parachain/pallets/template/README.md diff --git a/substrate/bin/node-template/pallets/template/src/benchmarking.rs b/templates/parachain/pallets/template/src/benchmarking.rs similarity index 100% rename from substrate/bin/node-template/pallets/template/src/benchmarking.rs rename to templates/parachain/pallets/template/src/benchmarking.rs diff --git a/cumulus/parachain-template/pallets/template/src/lib.rs b/templates/parachain/pallets/template/src/lib.rs similarity index 96% rename from cumulus/parachain-template/pallets/template/src/lib.rs rename to templates/parachain/pallets/template/src/lib.rs index 5f3252bfc3a70aa731bf572493a3797bfef574bb..11587d1df426f485139eab9e333b292768c6c571 100644 --- a/cumulus/parachain-template/pallets/template/src/lib.rs +++ b/templates/parachain/pallets/template/src/lib.rs @@ -11,6 +11,8 @@ mod mock; #[cfg(test)] mod tests; +pub mod weights; + #[cfg(feature = "runtime-benchmarks")] mod benchmarking; @@ -24,6 +26,8 @@ pub mod pallet { pub trait Config: frame_system::Config { /// Because this pallet emits events, it depends on the runtime's definition of an event. type RuntimeEvent: From> + IsType<::RuntimeEvent>; + /// A type representing the weights required by the dispatchables of this pallet. + type WeightInfo: crate::weights::WeightInfo; } #[pallet::pallet] @@ -32,7 +36,6 @@ pub mod pallet { // The pallet's runtime storage items. // https://docs.substrate.io/v3/runtime/storage #[pallet::storage] - #[pallet::getter(fn something)] // Learn more about declaring storage items: // https://docs.substrate.io/v3/runtime/storage#declaring-storage-items pub type Something = StorageValue<_, u32>; diff --git a/cumulus/parachain-template/pallets/template/src/mock.rs b/templates/parachain/pallets/template/src/mock.rs similarity index 98% rename from cumulus/parachain-template/pallets/template/src/mock.rs rename to templates/parachain/pallets/template/src/mock.rs index 411a16b116c8f94757c29a686022842e159e6924..f510a8b773acf398ef2442254de25e75f867fcbb 100644 --- a/cumulus/parachain-template/pallets/template/src/mock.rs +++ b/templates/parachain/pallets/template/src/mock.rs @@ -51,6 +51,7 @@ impl system::Config for Test { impl crate::Config for Test { type RuntimeEvent = RuntimeEvent; + type WeightInfo = (); } // Build genesis storage according to the mock runtime. diff --git a/cumulus/parachain-template/pallets/template/src/tests.rs b/templates/parachain/pallets/template/src/tests.rs similarity index 86% rename from cumulus/parachain-template/pallets/template/src/tests.rs rename to templates/parachain/pallets/template/src/tests.rs index 527aec8ed00c058e5a9329a50d60267d7f9d1851..9ad3076be2cc9927063e1b50c293cafadf8f3361 100644 --- a/cumulus/parachain-template/pallets/template/src/tests.rs +++ b/templates/parachain/pallets/template/src/tests.rs @@ -1,4 +1,4 @@ -use crate::{mock::*, Error}; +use crate::{mock::*, Error, Something}; use frame_support::{assert_noop, assert_ok}; #[test] @@ -7,7 +7,7 @@ fn it_works_for_default_value() { // Dispatch a signed extrinsic. assert_ok!(TemplateModule::do_something(RuntimeOrigin::signed(1), 42)); // Read pallet storage and assert an expected result. - assert_eq!(TemplateModule::something(), Some(42)); + assert_eq!(Something::::get(), Some(42)); }); } diff --git a/substrate/bin/node-template/pallets/template/src/weights.rs b/templates/parachain/pallets/template/src/weights.rs similarity index 100% rename from substrate/bin/node-template/pallets/template/src/weights.rs rename to templates/parachain/pallets/template/src/weights.rs diff --git a/cumulus/parachain-template/polkadot-launch/config.json b/templates/parachain/polkadot-launch/config.json similarity index 100% rename from cumulus/parachain-template/polkadot-launch/config.json rename to templates/parachain/polkadot-launch/config.json diff --git a/cumulus/parachain-template/runtime/Cargo.toml b/templates/parachain/runtime/Cargo.toml similarity index 82% rename from cumulus/parachain-template/runtime/Cargo.toml rename to templates/parachain/runtime/Cargo.toml index 44d96ffc4e628e5b48209e46fe39da1a75826043..9d9da2b4b97408ca53665470f3d911e0e8b7985b 100644 --- a/cumulus/parachain-template/runtime/Cargo.toml +++ b/templates/parachain/runtime/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "parachain-template-runtime" -version = "0.7.0" -authors = ["Anonymous"] -description = "A new Cumulus FRAME-based Substrate Runtime, ready for hacking together a parachain." -license = "Unlicense" -homepage = "https://substrate.io" +description = "A parachain runtime template built with Substrate and Cumulus, part of Polkadot Sdk." +version = "0.0.0" +license = "MIT-0" +authors.workspace = true +homepage.workspace = true repository.workspace = true edition.workspace = true +publish = false [lints] workspace = true @@ -18,16 +19,20 @@ targets = ["x86_64-unknown-linux-gnu"] substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder", optional = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ + "derive", +] } hex-literal = { version = "0.4.1", optional = true } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = [ + "derive", +] } smallvec = "1.11.0" # Local pallet-parachain-template = { path = "../pallets/template", default-features = false } -# Substrate +# Substrate / FRAME frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } frame-executive = { path = "../../../substrate/frame/executive", default-features = false } frame-support = { path = "../../../substrate/frame/support", default-features = false } @@ -35,6 +40,8 @@ frame-system = { path = "../../../substrate/frame/system", default-features = fa frame-system-benchmarking = { path = "../../../substrate/frame/system/benchmarking", default-features = false, optional = true } frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false } frame-try-runtime = { path = "../../../substrate/frame/try-runtime", default-features = false, optional = true } + +# FRAME Pallets pallet-aura = { path = "../../../substrate/frame/aura", default-features = false } pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } @@ -44,6 +51,8 @@ pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false } pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } + +# Substrate Primitives sp-api = { path = "../../../substrate/primitives/api", default-features = false } sp-block-builder = { path = "../../../substrate/primitives/block-builder", default-features = false } sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura", default-features = false } @@ -66,16 +75,19 @@ xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/x xcm-executor = { package = "staging-xcm-executor", path = "../../../polkadot/xcm/xcm-executor", default-features = false } # Cumulus -cumulus-pallet-aura-ext = { path = "../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } -cumulus-pallet-session-benchmarking = { path = "../../pallets/session-benchmarking", default-features = false } -cumulus-pallet-xcm = { path = "../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../pallets/xcmp-queue", default-features = false } -cumulus-primitives-core = { path = "../../primitives/core", default-features = false } -cumulus-primitives-utility = { path = "../../primitives/utility", default-features = false } -pallet-collator-selection = { path = "../../pallets/collator-selection", default-features = false } -parachains-common = { path = "../../parachains/common", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../parachains/pallets/parachain-info", default-features = false } +cumulus-pallet-aura-ext = { path = "../../../cumulus/pallets/aura-ext", default-features = false } +cumulus-pallet-parachain-system = { path = "../../../cumulus/pallets/parachain-system", default-features = false, features = [ + "parameterized-consensus-hook", +] } +cumulus-pallet-session-benchmarking = { path = "../../../cumulus/pallets/session-benchmarking", default-features = false } +cumulus-pallet-xcm = { path = "../../../cumulus/pallets/xcm", default-features = false } +cumulus-pallet-xcmp-queue = { path = "../../../cumulus/pallets/xcmp-queue", default-features = false } +cumulus-primitives-core = { path = "../../../cumulus/primitives/core", default-features = false } +cumulus-primitives-utility = { path = "../../../cumulus/primitives/utility", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "../../../cumulus/primitives/storage-weight-reclaim", default-features = false } +pallet-collator-selection = { path = "../../../cumulus/pallets/collator-selection", default-features = false } +parachains-common = { path = "../../../cumulus/parachains/common", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../cumulus/parachains/pallets/parachain-info", default-features = false } [features] default = ["std"] @@ -87,6 +99,7 @@ std = [ "cumulus-pallet-xcm/std", "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-core/std", + "cumulus-primitives-storage-weight-reclaim/std", "cumulus-primitives-utility/std", "frame-benchmarking?/std", "frame-executive/std", diff --git a/cumulus/parachain-template/runtime/build.rs b/templates/parachain/runtime/build.rs similarity index 100% rename from cumulus/parachain-template/runtime/build.rs rename to templates/parachain/runtime/build.rs diff --git a/cumulus/parachain-template/runtime/src/lib.rs b/templates/parachain/runtime/src/lib.rs similarity index 98% rename from cumulus/parachain-template/runtime/src/lib.rs rename to templates/parachain/runtime/src/lib.rs index d9bc111fcef7f1932d4b52e3f0c9f42d0dccc6c0..89899d286bb74b1d16302f1a306456e7d490ae3a 100644 --- a/cumulus/parachain-template/runtime/src/lib.rs +++ b/templates/parachain/runtime/src/lib.rs @@ -107,6 +107,7 @@ pub type SignedExtra = ( frame_system::CheckNonce, frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. @@ -179,8 +180,8 @@ impl_opaque_keys! { #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("template-parachain"), - impl_name: create_runtime_str!("template-parachain"), + spec_name: create_runtime_str!("parachain-template-runtime"), + impl_name: create_runtime_str!("parachain-template-runtime"), authoring_version: 1, spec_version: 1, impl_version: 0, @@ -488,6 +489,7 @@ impl pallet_collator_selection::Config for Runtime { /// Configure the pallet template in pallets/template. impl pallet_parachain_template::Config for Runtime { type RuntimeEvent = RuntimeEvent; + type WeightInfo = pallet_parachain_template::weights::SubstrateWeight; } // Create the runtime by composing the FRAME pallets that were previously configured. @@ -559,7 +561,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } diff --git a/cumulus/parachain-template/runtime/src/weights/block_weights.rs b/templates/parachain/runtime/src/weights/block_weights.rs similarity index 100% rename from cumulus/parachain-template/runtime/src/weights/block_weights.rs rename to templates/parachain/runtime/src/weights/block_weights.rs diff --git a/cumulus/parachain-template/runtime/src/weights/extrinsic_weights.rs b/templates/parachain/runtime/src/weights/extrinsic_weights.rs similarity index 100% rename from cumulus/parachain-template/runtime/src/weights/extrinsic_weights.rs rename to templates/parachain/runtime/src/weights/extrinsic_weights.rs diff --git a/cumulus/parachain-template/runtime/src/weights/mod.rs b/templates/parachain/runtime/src/weights/mod.rs similarity index 100% rename from cumulus/parachain-template/runtime/src/weights/mod.rs rename to templates/parachain/runtime/src/weights/mod.rs diff --git a/cumulus/parachain-template/runtime/src/weights/paritydb_weights.rs b/templates/parachain/runtime/src/weights/paritydb_weights.rs similarity index 100% rename from cumulus/parachain-template/runtime/src/weights/paritydb_weights.rs rename to templates/parachain/runtime/src/weights/paritydb_weights.rs diff --git a/cumulus/parachain-template/runtime/src/weights/rocksdb_weights.rs b/templates/parachain/runtime/src/weights/rocksdb_weights.rs similarity index 100% rename from cumulus/parachain-template/runtime/src/weights/rocksdb_weights.rs rename to templates/parachain/runtime/src/weights/rocksdb_weights.rs diff --git a/cumulus/parachain-template/runtime/src/xcm_config.rs b/templates/parachain/runtime/src/xcm_config.rs similarity index 100% rename from cumulus/parachain-template/runtime/src/xcm_config.rs rename to templates/parachain/runtime/src/xcm_config.rs diff --git a/substrate/bin/node-template/LICENSE b/templates/solochain/LICENSE similarity index 100% rename from substrate/bin/node-template/LICENSE rename to templates/solochain/LICENSE diff --git a/substrate/bin/node-template/README.md b/templates/solochain/README.md similarity index 100% rename from substrate/bin/node-template/README.md rename to templates/solochain/README.md diff --git a/substrate/bin/node-template/docs/rust-setup.md b/templates/solochain/docs/rust-setup.md similarity index 100% rename from substrate/bin/node-template/docs/rust-setup.md rename to templates/solochain/docs/rust-setup.md diff --git a/substrate/bin/node-template/env-setup/README.md b/templates/solochain/env-setup/README.md similarity index 100% rename from substrate/bin/node-template/env-setup/README.md rename to templates/solochain/env-setup/README.md diff --git a/substrate/bin/node-template/env-setup/flake.lock b/templates/solochain/env-setup/flake.lock similarity index 100% rename from substrate/bin/node-template/env-setup/flake.lock rename to templates/solochain/env-setup/flake.lock diff --git a/substrate/bin/node-template/env-setup/flake.nix b/templates/solochain/env-setup/flake.nix similarity index 100% rename from substrate/bin/node-template/env-setup/flake.nix rename to templates/solochain/env-setup/flake.nix diff --git a/substrate/bin/node-template/env-setup/rust-toolchain.toml b/templates/solochain/env-setup/rust-toolchain.toml similarity index 100% rename from substrate/bin/node-template/env-setup/rust-toolchain.toml rename to templates/solochain/env-setup/rust-toolchain.toml diff --git a/templates/solochain/node/Cargo.toml b/templates/solochain/node/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..48bb511bfd10d711046415500b04c44bdcb9d654 --- /dev/null +++ b/templates/solochain/node/Cargo.toml @@ -0,0 +1,91 @@ +[package] +name = "solochain-template-node" +description = "A solochain node template built with Substrate, part of Polkadot Sdk." +version = "0.0.0" +license = "MIT-0" +authors.workspace = true +homepage.workspace = true +repository.workspace = true +edition.workspace = true +publish = false + +build = "build.rs" + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +clap = { version = "4.5.1", features = ["derive"] } +futures = { version = "0.3.21", features = ["thread-pool"] } +serde_json = { workspace = true, default-features = true } +jsonrpsee = { version = "0.22", features = ["server"] } + +# substrate client +sc-cli = { path = "../../../substrate/client/cli" } +sp-core = { path = "../../../substrate/primitives/core" } +sc-executor = { path = "../../../substrate/client/executor" } +sc-network = { path = "../../../substrate/client/network" } +sc-service = { path = "../../../substrate/client/service" } +sc-telemetry = { path = "../../../substrate/client/telemetry" } +sc-transaction-pool = { path = "../../../substrate/client/transaction-pool" } +sc-transaction-pool-api = { path = "../../../substrate/client/transaction-pool/api" } +sc-offchain = { path = "../../../substrate/client/offchain" } +sc-consensus-aura = { path = "../../../substrate/client/consensus/aura" } +sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura" } +sc-consensus = { path = "../../../substrate/client/consensus/common" } +sc-consensus-grandpa = { path = "../../../substrate/client/consensus/grandpa" } +sp-consensus-grandpa = { path = "../../../substrate/primitives/consensus/grandpa" } +sc-client-api = { path = "../../../substrate/client/api" } +sc-rpc-api = { path = "../../../substrate/client/rpc-api" } +sc-basic-authorship = { path = "../../../substrate/client/basic-authorship" } + +# substrate primitives +sp-runtime = { path = "../../../substrate/primitives/runtime" } +sp-io = { path = "../../../substrate/primitives/io" } +sp-timestamp = { path = "../../../substrate/primitives/timestamp" } +sp-inherents = { path = "../../../substrate/primitives/inherents" } +sp-keyring = { path = "../../../substrate/primitives/keyring" } +sp-api = { path = "../../../substrate/primitives/api" } +sp-blockchain = { path = "../../../substrate/primitives/blockchain" } +sp-block-builder = { path = "../../../substrate/primitives/block-builder" } + +# frame and pallets +frame-system = { path = "../../../substrate/frame/system" } +pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } +pallet-transaction-payment-rpc = { path = "../../../substrate/frame/transaction-payment/rpc" } +substrate-frame-rpc-system = { path = "../../../substrate/utils/frame/rpc/system" } + +# These dependencies are used for runtime benchmarking +frame-benchmarking-cli = { path = "../../../substrate/utils/frame/benchmarking-cli" } + +# Local Dependencies +solochain-template-runtime = { path = "../runtime" } + +# CLI-specific dependencies +try-runtime-cli = { path = "../../../substrate/utils/frame/try-runtime/cli", optional = true } + +[build-dependencies] +substrate-build-script-utils = { path = "../../../substrate/utils/build-script-utils" } + +[features] +default = [] +# Dependencies that are only required if runtime benchmarking should be build. +runtime-benchmarks = [ + "frame-benchmarking-cli/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sc-service/runtime-benchmarks", + "solochain-template-runtime/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] +# Enable features that allow the runtime to be tried and debugged. Name might be subject to change +# in the near future. +try-runtime = [ + "frame-system/try-runtime", + "pallet-transaction-payment/try-runtime", + "solochain-template-runtime/try-runtime", + "sp-runtime/try-runtime", + "try-runtime-cli/try-runtime", +] diff --git a/substrate/bin/node-template/node/build.rs b/templates/solochain/node/build.rs similarity index 100% rename from substrate/bin/node-template/node/build.rs rename to templates/solochain/node/build.rs diff --git a/substrate/bin/node-template/node/src/benchmarking.rs b/templates/solochain/node/src/benchmarking.rs similarity index 99% rename from substrate/bin/node-template/node/src/benchmarking.rs rename to templates/solochain/node/src/benchmarking.rs index 6e29ad1a123118d953c1fad654bad9343fd8ca53..d1d8c2ccabaf64c730e14eb32e076052ae4a9e2b 100644 --- a/substrate/bin/node-template/node/src/benchmarking.rs +++ b/templates/solochain/node/src/benchmarking.rs @@ -4,10 +4,10 @@ use crate::service::FullClient; -use node_template_runtime as runtime; use runtime::{AccountId, Balance, BalancesCall, SystemCall}; use sc_cli::Result; use sc_client_api::BlockBackend; +use solochain_template_runtime as runtime; use sp_core::{Encode, Pair}; use sp_inherents::{InherentData, InherentDataProvider}; use sp_keyring::Sr25519Keyring; diff --git a/substrate/bin/node-template/node/src/chain_spec.rs b/templates/solochain/node/src/chain_spec.rs similarity index 97% rename from substrate/bin/node-template/node/src/chain_spec.rs rename to templates/solochain/node/src/chain_spec.rs index 6e0d78f647a594d7e3c247041a7499691edaad1e..be49f2c1fc731ee4e5ece7841151068abf0f0790 100644 --- a/substrate/bin/node-template/node/src/chain_spec.rs +++ b/templates/solochain/node/src/chain_spec.rs @@ -1,5 +1,5 @@ -use node_template_runtime::{AccountId, RuntimeGenesisConfig, Signature, WASM_BINARY}; use sc_service::ChainType; +use solochain_template_runtime::{AccountId, RuntimeGenesisConfig, Signature, WASM_BINARY}; use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_consensus_grandpa::AuthorityId as GrandpaId; use sp_core::{sr25519, Pair, Public}; diff --git a/substrate/bin/node-template/node/src/cli.rs b/templates/solochain/node/src/cli.rs similarity index 98% rename from substrate/bin/node-template/node/src/cli.rs rename to templates/solochain/node/src/cli.rs index 98037eb886a8ec04ee78bc8f0b21b2269872d0fc..7f1b67b3696e6844f48c91b742a9017bff267d82 100644 --- a/substrate/bin/node-template/node/src/cli.rs +++ b/templates/solochain/node/src/cli.rs @@ -43,7 +43,7 @@ pub enum Subcommand { /// Try-runtime has migrated to a standalone CLI /// (). The subcommand exists as a stub and - /// deprecation notice. It will be removed entirely some time after Janurary 2024. + /// deprecation notice. It will be removed entirely some time after January 2024. TryRuntime, /// Db meta columns information. diff --git a/substrate/bin/node-template/node/src/command.rs b/templates/solochain/node/src/command.rs similarity index 98% rename from substrate/bin/node-template/node/src/command.rs rename to templates/solochain/node/src/command.rs index 3778df6642294684780c347730eb580b69b54d65..42d1477f22f18579d49fb48fdc91df39b10aaa2b 100644 --- a/substrate/bin/node-template/node/src/command.rs +++ b/templates/solochain/node/src/command.rs @@ -5,9 +5,9 @@ use crate::{ service, }; use frame_benchmarking_cli::{BenchmarkCmd, ExtrinsicFactory, SUBSTRATE_REFERENCE_HARDWARE}; -use node_template_runtime::{Block, EXISTENTIAL_DEPOSIT}; use sc_cli::SubstrateCli; use sc_service::PartialComponents; +use solochain_template_runtime::{Block, EXISTENTIAL_DEPOSIT}; use sp_keyring::Sr25519Keyring; impl SubstrateCli for Cli { diff --git a/substrate/bin/node-template/node/src/main.rs b/templates/solochain/node/src/main.rs similarity index 100% rename from substrate/bin/node-template/node/src/main.rs rename to templates/solochain/node/src/main.rs diff --git a/substrate/bin/node-template/node/src/rpc.rs b/templates/solochain/node/src/rpc.rs similarity index 96% rename from substrate/bin/node-template/node/src/rpc.rs rename to templates/solochain/node/src/rpc.rs index 246391adcbbe88a03b6cf9cf9043d82b8de18b60..fe2b6ca72ede5c8d9f8d878db88efa2da5c5ac97 100644 --- a/substrate/bin/node-template/node/src/rpc.rs +++ b/templates/solochain/node/src/rpc.rs @@ -8,8 +8,8 @@ use std::sync::Arc; use jsonrpsee::RpcModule; -use node_template_runtime::{opaque::Block, AccountId, Balance, Nonce}; use sc_transaction_pool_api::TransactionPool; +use solochain_template_runtime::{opaque::Block, AccountId, Balance, Nonce}; use sp_api::ProvideRuntimeApi; use sp_block_builder::BlockBuilder; use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; diff --git a/substrate/bin/node-template/node/src/service.rs b/templates/solochain/node/src/service.rs similarity index 94% rename from substrate/bin/node-template/node/src/service.rs rename to templates/solochain/node/src/service.rs index 25cd651178411c6338a2b0d1e7836804a3a5b676..dc25f7579129fe3ea86d0686732be5e6baec40ba 100644 --- a/substrate/bin/node-template/node/src/service.rs +++ b/templates/solochain/node/src/service.rs @@ -1,13 +1,13 @@ //! Service and ServiceFactory implementation. Specialized wrapper over substrate service. use futures::FutureExt; -use node_template_runtime::{self, opaque::Block, RuntimeApi}; use sc_client_api::{Backend, BlockBackend}; use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams}; use sc_consensus_grandpa::SharedVoterState; use sc_service::{error::Error as ServiceError, Configuration, TaskManager, WarpSyncParams}; use sc_telemetry::{Telemetry, TelemetryWorker}; use sc_transaction_pool_api::OffchainTransactionPoolFactory; +use solochain_template_runtime::{self, opaque::Block, RuntimeApi}; use sp_consensus_aura::sr25519::AuthorityPair as AuraPair; use std::{sync::Arc, time::Duration}; @@ -80,23 +80,29 @@ pub fn new_partial(config: &Configuration) -> Result { telemetry.as_ref().map(|x| x.handle()), )?; - let slot_duration = sc_consensus_aura::slot_duration(&*client)?; - + let cidp_client = client.clone(); let import_queue = sc_consensus_aura::import_queue::(ImportQueueParams { block_import: grandpa_block_import.clone(), justification_import: Some(Box::new(grandpa_block_import.clone())), client: client.clone(), - create_inherent_data_providers: move |_, ()| async move { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + create_inherent_data_providers: move |parent_hash, _| { + let cidp_client = cidp_client.clone(); + async move { + let slot_duration = sc_consensus_aura::standalone::slot_duration_at( + &*cidp_client, + parent_hash, + )?; + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - let slot = - sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - *timestamp, - slot_duration, - ); + let slot = + sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( + *timestamp, + slot_duration, + ); - Ok((slot, timestamp)) + Ok((slot, timestamp)) + } }, spawner: &task_manager.spawn_essential_handle(), registry: config.prometheus_registry(), diff --git a/substrate/bin/node-template/pallets/template/Cargo.toml b/templates/solochain/pallets/template/Cargo.toml similarity index 55% rename from substrate/bin/node-template/pallets/template/Cargo.toml rename to templates/solochain/pallets/template/Cargo.toml index 51410a71c7bcee0267f36bbfcf20c616a5537ce3..bd2347151989df8950dc4dd96f039c74241d28a5 100644 --- a/substrate/bin/node-template/pallets/template/Cargo.toml +++ b/templates/solochain/pallets/template/Cargo.toml @@ -1,13 +1,13 @@ [package] name = "pallet-template" -version = "4.0.0-dev" description = "FRAME pallet template for defining custom runtime logic." -authors = ["Substrate DevHub "] -homepage = "https://substrate.io" -edition.workspace = true +version = "0.0.0" license = "MIT-0" +authors.workspace = true +homepage.workspace = true +repository.workspace = true +edition.workspace = true publish = false -repository = "https://github.com/substrate-developer-hub/substrate-node-template/" [lints] workspace = true @@ -19,16 +19,19 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../../../../frame/benchmarking", default-features = false, optional = true } -frame-support = { path = "../../../../frame/support", default-features = false } -frame-system = { path = "../../../../frame/system", default-features = false } -sp-std = { path = "../../../../primitives/std", default-features = false } +scale-info = { version = "2.10.0", default-features = false, features = [ + "derive", +] } + +# frame deps +frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-support = { path = "../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../substrate/frame/system", default-features = false } [dev-dependencies] -sp-core = { path = "../../../../primitives/core" } -sp-io = { path = "../../../../primitives/io" } -sp-runtime = { path = "../../../../primitives/runtime" } +sp-core = { path = "../../../../substrate/primitives/core" } +sp-io = { path = "../../../../substrate/primitives/io" } +sp-runtime = { path = "../../../../substrate/primitives/runtime" } [features] default = ["std"] @@ -41,7 +44,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/templates/solochain/pallets/template/README.md b/templates/solochain/pallets/template/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9e4dc55267d69c47fff971cb0427bcb2e0ff871c --- /dev/null +++ b/templates/solochain/pallets/template/README.md @@ -0,0 +1 @@ +License: MIT-0 diff --git a/templates/solochain/pallets/template/src/benchmarking.rs b/templates/solochain/pallets/template/src/benchmarking.rs new file mode 100644 index 0000000000000000000000000000000000000000..5a262417629c579c6ecf5ada30ae803217623766 --- /dev/null +++ b/templates/solochain/pallets/template/src/benchmarking.rs @@ -0,0 +1,35 @@ +//! Benchmarking setup for pallet-template +#![cfg(feature = "runtime-benchmarks")] +use super::*; + +#[allow(unused)] +use crate::Pallet as Template; +use frame_benchmarking::v2::*; +use frame_system::RawOrigin; + +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn do_something() { + let value = 100u32.into(); + let caller: T::AccountId = whitelisted_caller(); + #[extrinsic_call] + do_something(RawOrigin::Signed(caller), value); + + assert_eq!(Something::::get(), Some(value)); + } + + #[benchmark] + fn cause_error() { + Something::::put(100u32); + let caller: T::AccountId = whitelisted_caller(); + #[extrinsic_call] + cause_error(RawOrigin::Signed(caller)); + + assert_eq!(Something::::get(), Some(101u32)); + } + + impl_benchmark_test_suite!(Template, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/substrate/bin/node-template/pallets/template/src/lib.rs b/templates/solochain/pallets/template/src/lib.rs similarity index 98% rename from substrate/bin/node-template/pallets/template/src/lib.rs rename to templates/solochain/pallets/template/src/lib.rs index 4a2e53baa774ee5ca333602024d458aa078f9c10..90dfe370145856cc08483cdbce2bbc542b21b2b9 100644 --- a/substrate/bin/node-template/pallets/template/src/lib.rs +++ b/templates/solochain/pallets/template/src/lib.rs @@ -90,9 +90,7 @@ pub mod pallet { /// /// In this template, we are declaring a storage item called `Something` that stores a single /// `u32` value. Learn more about runtime storage here: - /// The [`getter`] macro generates a function to conveniently retrieve the value from storage. #[pallet::storage] - #[pallet::getter(fn something)] pub type Something = StorageValue<_, u32>; /// Events that functions in this pallet can emit. @@ -187,7 +185,7 @@ pub mod pallet { let _who = ensure_signed(origin)?; // Read a value from storage. - match Pallet::::something() { + match Something::::get() { // Return an error if the value has not been set. None => Err(Error::::NoneValue.into()), Some(old) => { diff --git a/substrate/bin/node-template/pallets/template/src/mock.rs b/templates/solochain/pallets/template/src/mock.rs similarity index 100% rename from substrate/bin/node-template/pallets/template/src/mock.rs rename to templates/solochain/pallets/template/src/mock.rs diff --git a/substrate/bin/node-template/pallets/template/src/tests.rs b/templates/solochain/pallets/template/src/tests.rs similarity index 88% rename from substrate/bin/node-template/pallets/template/src/tests.rs rename to templates/solochain/pallets/template/src/tests.rs index 7c2b853ee4dc56fdf526a17557fe37281a50e803..83e4bea7377b348d8ac1d389813a788f79b81970 100644 --- a/substrate/bin/node-template/pallets/template/src/tests.rs +++ b/templates/solochain/pallets/template/src/tests.rs @@ -1,4 +1,4 @@ -use crate::{mock::*, Error, Event}; +use crate::{mock::*, Error, Event, Something}; use frame_support::{assert_noop, assert_ok}; #[test] @@ -9,7 +9,7 @@ fn it_works_for_default_value() { // Dispatch a signed extrinsic. assert_ok!(TemplateModule::do_something(RuntimeOrigin::signed(1), 42)); // Read pallet storage and assert an expected result. - assert_eq!(TemplateModule::something(), Some(42)); + assert_eq!(Something::::get(), Some(42)); // Assert that the correct event was deposited System::assert_last_event(Event::SomethingStored { something: 42, who: 1 }.into()); }); diff --git a/templates/solochain/pallets/template/src/weights.rs b/templates/solochain/pallets/template/src/weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..7c42936e09f292de831d28460a3bc39436c3323f --- /dev/null +++ b/templates/solochain/pallets/template/src/weights.rs @@ -0,0 +1,90 @@ + +//! Autogenerated weights for pallet_template +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `Alexs-MacBook-Pro-2.local`, CPU: `` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 + +// Executed Command: +// ../../target/release/node-template +// benchmark +// pallet +// --chain +// dev +// --pallet +// pallet_template +// --extrinsic +// * +// --steps=50 +// --repeat=20 +// --wasm-execution=compiled +// --output +// pallets/template/src/weights.rs +// --template +// ../../.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for pallet_template. +pub trait WeightInfo { + fn do_something() -> Weight; + fn cause_error() -> Weight; +} + +/// Weights for pallet_template using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: TemplateModule Something (r:0 w:1) + /// Proof: TemplateModule Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + fn do_something() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_000_000 picoseconds. + Weight::from_parts(9_000_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: TemplateModule Something (r:1 w:1) + /// Proof: TemplateModule Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + fn cause_error() -> Weight { + // Proof Size summary in bytes: + // Measured: `32` + // Estimated: `1489` + // Minimum execution time: 6_000_000 picoseconds. + Weight::from_parts(6_000_000, 1489) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + /// Storage: TemplateModule Something (r:0 w:1) + /// Proof: TemplateModule Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + fn do_something() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_000_000 picoseconds. + Weight::from_parts(9_000_000, 0) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: TemplateModule Something (r:1 w:1) + /// Proof: TemplateModule Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + fn cause_error() -> Weight { + // Proof Size summary in bytes: + // Measured: `32` + // Estimated: `1489` + // Minimum execution time: 6_000_000 picoseconds. + Weight::from_parts(6_000_000, 1489) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } +} diff --git a/templates/solochain/runtime/Cargo.toml b/templates/solochain/runtime/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..a7fc0519ba20e319d093833e4f099457e6999785 --- /dev/null +++ b/templates/solochain/runtime/Cargo.toml @@ -0,0 +1,151 @@ +[package] +name = "solochain-template-runtime" +description = "A solochain runtime template built with Substrate, part of Polkadot Sdk." +version = "0.0.0" +license = "MIT-0" +authors.workspace = true +homepage.workspace = true +repository.workspace = true +edition.workspace = true +publish = false + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ + "derive", +] } +scale-info = { version = "2.10.0", default-features = false, features = [ + "derive", + "serde", +] } + +# frame +frame-support = { path = "../../../substrate/frame/support", default-features = false, features = ["experimental"] } +frame-system = { path = "../../../substrate/frame/system", default-features = false } +frame-try-runtime = { path = "../../../substrate/frame/try-runtime", default-features = false, optional = true } +frame-executive = { path = "../../../substrate/frame/executive", default-features = false } + +# frame pallets +pallet-aura = { path = "../../../substrate/frame/aura", default-features = false } +pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } +pallet-grandpa = { path = "../../../substrate/frame/grandpa", default-features = false } +pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false } +pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false } +pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } + +# primitives +sp-api = { path = "../../../substrate/primitives/api", default-features = false } +sp-block-builder = { path = "../../../substrate/primitives/block-builder", default-features = false } +sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura", default-features = false, features = [ + "serde", +] } +sp-consensus-grandpa = { path = "../../../substrate/primitives/consensus/grandpa", default-features = false, features = [ + "serde", +] } +sp-core = { path = "../../../substrate/primitives/core", default-features = false, features = [ + "serde", +] } +sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } +sp-offchain = { path = "../../../substrate/primitives/offchain", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false, features = [ + "serde", +] } +sp-session = { path = "../../../substrate/primitives/session", default-features = false } +sp-std = { path = "../../../substrate/primitives/std", default-features = false } +sp-storage = { path = "../../../substrate/primitives/storage", default-features = false } +sp-transaction-pool = { path = "../../../substrate/primitives/transaction-pool", default-features = false } +sp-version = { path = "../../../substrate/primitives/version", default-features = false, features = [ + "serde", +] } +sp-genesis-builder = { default-features = false, path = "../../../substrate/primitives/genesis-builder" } + +# RPC related +frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } + +# Used for runtime benchmarking +frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-system-benchmarking = { path = "../../../substrate/frame/system/benchmarking", default-features = false, optional = true } + +# The pallet in this template. +pallet-template = { path = "../pallets/template", default-features = false } + +[build-dependencies] +substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder", optional = true } + +[features] +default = ["std"] +std = [ + "codec/std", + "scale-info/std", + + "frame-executive/std", + "frame-support/std", + "frame-system-benchmarking?/std", + "frame-system-rpc-runtime-api/std", + "frame-system/std", + + "frame-benchmarking?/std", + "frame-try-runtime?/std", + + "pallet-aura/std", + "pallet-balances/std", + "pallet-grandpa/std", + "pallet-sudo/std", + "pallet-template/std", + "pallet-timestamp/std", + "pallet-transaction-payment-rpc-runtime-api/std", + "pallet-transaction-payment/std", + + "sp-api/std", + "sp-block-builder/std", + "sp-consensus-aura/std", + "sp-consensus-grandpa/std", + "sp-core/std", + "sp-genesis-builder/std", + "sp-inherents/std", + "sp-offchain/std", + "sp-runtime/std", + "sp-session/std", + "sp-std/std", + "sp-storage/std", + "sp-transaction-pool/std", + "sp-version/std", + + "substrate-wasm-builder", +] + +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system-benchmarking/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-grandpa/runtime-benchmarks", + "pallet-sudo/runtime-benchmarks", + "pallet-template/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] + +try-runtime = [ + "frame-executive/try-runtime", + "frame-support/try-runtime", + "frame-system/try-runtime", + "frame-try-runtime/try-runtime", + "pallet-aura/try-runtime", + "pallet-balances/try-runtime", + "pallet-grandpa/try-runtime", + "pallet-sudo/try-runtime", + "pallet-template/try-runtime", + "pallet-timestamp/try-runtime", + "pallet-transaction-payment/try-runtime", + "sp-runtime/try-runtime", +] + +experimental = ["pallet-aura/experimental"] diff --git a/substrate/bin/node-template/runtime/build.rs b/templates/solochain/runtime/build.rs similarity index 100% rename from substrate/bin/node-template/runtime/build.rs rename to templates/solochain/runtime/build.rs diff --git a/substrate/bin/node-template/runtime/src/lib.rs b/templates/solochain/runtime/src/lib.rs similarity index 94% rename from substrate/bin/node-template/runtime/src/lib.rs rename to templates/solochain/runtime/src/lib.rs index 3b6a74be2512c6214b2fadada68bb22d746f07a9..a8a5b7857e13a90b98aa701c1fca71182cf2da8d 100644 --- a/substrate/bin/node-template/runtime/src/lib.rs +++ b/templates/solochain/runtime/src/lib.rs @@ -1,8 +1,5 @@ #![cfg_attr(not(feature = "std"), no_std)] -// `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. -#![recursion_limit = "256"] -// Make the WASM binary available. #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); @@ -22,7 +19,6 @@ use sp_version::NativeVersion; use sp_version::RuntimeVersion; use frame_support::genesis_builder_helper::{build_config, create_default_config}; -// A few exports that help ease life for downstream crates. pub use frame_support::{ construct_runtime, derive_impl, parameter_types, traits::{ @@ -95,8 +91,8 @@ pub mod opaque { // https://docs.substrate.io/main-docs/build/upgrade#runtime-versioning #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("node-template"), - impl_name: create_runtime_str!("node-template"), + spec_name: create_runtime_str!("solochain-template-runtime"), + impl_name: create_runtime_str!("solochain-template-runtime"), authoring_version: 1, // The version of the runtime specification. A full node will not attempt to use its native // runtime in substitute for the on-chain Wasm runtime unless all of `spec_name`, @@ -256,19 +252,47 @@ impl pallet_template::Config for Runtime { } // Create the runtime by composing the FRAME pallets that were previously configured. -construct_runtime!( - pub enum Runtime { - System: frame_system, - Timestamp: pallet_timestamp, - Aura: pallet_aura, - Grandpa: pallet_grandpa, - Balances: pallet_balances, - TransactionPayment: pallet_transaction_payment, - Sudo: pallet_sudo, - // Include the custom logic from the pallet-template in the runtime. - TemplateModule: pallet_template, - } -); +#[frame_support::runtime] +mod runtime { + #[runtime::runtime] + #[runtime::derive( + RuntimeCall, + RuntimeEvent, + RuntimeError, + RuntimeOrigin, + RuntimeFreezeReason, + RuntimeHoldReason, + RuntimeSlashReason, + RuntimeLockId, + RuntimeTask + )] + pub struct Runtime; + + #[runtime::pallet_index(0)] + pub type System = frame_system; + + #[runtime::pallet_index(1)] + pub type Timestamp = pallet_timestamp; + + #[runtime::pallet_index(2)] + pub type Aura = pallet_aura; + + #[runtime::pallet_index(3)] + pub type Grandpa = pallet_grandpa; + + #[runtime::pallet_index(4)] + pub type Balances = pallet_balances; + + #[runtime::pallet_index(5)] + pub type TransactionPayment = pallet_transaction_payment; + + #[runtime::pallet_index(6)] + pub type Sudo = pallet_sudo; + + // Include the custom logic from the pallet-template in the runtime. + #[runtime::pallet_index(7)] + pub type TemplateModule = pallet_template; +} /// The address format for describing accounts. pub type Address = sp_runtime::MultiAddress; @@ -331,7 +355,7 @@ impl_runtime_apis! { Executive::execute_block(block); } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } }